##// END OF EJS Templates
transaction: turn lack of locking into a hard failure (API)...
Pierre-Yves David -
r29186:e0fc0ed4 default
parent child Browse files
Show More
@@ -1,1975 +1,1976 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import inspect
11 import inspect
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 wdirrev,
22 wdirrev,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 cmdutil,
30 cmdutil,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 encoding,
33 encoding,
34 error,
34 error,
35 exchange,
35 exchange,
36 extensions,
36 extensions,
37 filelog,
37 filelog,
38 hook,
38 hook,
39 lock as lockmod,
39 lock as lockmod,
40 manifest,
40 manifest,
41 match as matchmod,
41 match as matchmod,
42 merge as mergemod,
42 merge as mergemod,
43 namespaces,
43 namespaces,
44 obsolete,
44 obsolete,
45 pathutil,
45 pathutil,
46 peer,
46 peer,
47 phases,
47 phases,
48 pushkey,
48 pushkey,
49 repoview,
49 repoview,
50 revset,
50 revset,
51 scmutil,
51 scmutil,
52 store,
52 store,
53 subrepo,
53 subrepo,
54 tags as tagsmod,
54 tags as tagsmod,
55 transaction,
55 transaction,
56 util,
56 util,
57 )
57 )
58
58
59 release = lockmod.release
59 release = lockmod.release
60 urlerr = util.urlerr
60 urlerr = util.urlerr
61 urlreq = util.urlreq
61 urlreq = util.urlreq
62
62
63 class repofilecache(scmutil.filecache):
63 class repofilecache(scmutil.filecache):
64 """All filecache usage on repo are done for logic that should be unfiltered
64 """All filecache usage on repo are done for logic that should be unfiltered
65 """
65 """
66
66
67 def __get__(self, repo, type=None):
67 def __get__(self, repo, type=None):
68 return super(repofilecache, self).__get__(repo.unfiltered(), type)
68 return super(repofilecache, self).__get__(repo.unfiltered(), type)
69 def __set__(self, repo, value):
69 def __set__(self, repo, value):
70 return super(repofilecache, self).__set__(repo.unfiltered(), value)
70 return super(repofilecache, self).__set__(repo.unfiltered(), value)
71 def __delete__(self, repo):
71 def __delete__(self, repo):
72 return super(repofilecache, self).__delete__(repo.unfiltered())
72 return super(repofilecache, self).__delete__(repo.unfiltered())
73
73
74 class storecache(repofilecache):
74 class storecache(repofilecache):
75 """filecache for files in the store"""
75 """filecache for files in the store"""
76 def join(self, obj, fname):
76 def join(self, obj, fname):
77 return obj.sjoin(fname)
77 return obj.sjoin(fname)
78
78
79 class unfilteredpropertycache(util.propertycache):
79 class unfilteredpropertycache(util.propertycache):
80 """propertycache that apply to unfiltered repo only"""
80 """propertycache that apply to unfiltered repo only"""
81
81
82 def __get__(self, repo, type=None):
82 def __get__(self, repo, type=None):
83 unfi = repo.unfiltered()
83 unfi = repo.unfiltered()
84 if unfi is repo:
84 if unfi is repo:
85 return super(unfilteredpropertycache, self).__get__(unfi)
85 return super(unfilteredpropertycache, self).__get__(unfi)
86 return getattr(unfi, self.name)
86 return getattr(unfi, self.name)
87
87
88 class filteredpropertycache(util.propertycache):
88 class filteredpropertycache(util.propertycache):
89 """propertycache that must take filtering in account"""
89 """propertycache that must take filtering in account"""
90
90
91 def cachevalue(self, obj, value):
91 def cachevalue(self, obj, value):
92 object.__setattr__(obj, self.name, value)
92 object.__setattr__(obj, self.name, value)
93
93
94
94
95 def hasunfilteredcache(repo, name):
95 def hasunfilteredcache(repo, name):
96 """check if a repo has an unfilteredpropertycache value for <name>"""
96 """check if a repo has an unfilteredpropertycache value for <name>"""
97 return name in vars(repo.unfiltered())
97 return name in vars(repo.unfiltered())
98
98
99 def unfilteredmethod(orig):
99 def unfilteredmethod(orig):
100 """decorate method that always need to be run on unfiltered version"""
100 """decorate method that always need to be run on unfiltered version"""
101 def wrapper(repo, *args, **kwargs):
101 def wrapper(repo, *args, **kwargs):
102 return orig(repo.unfiltered(), *args, **kwargs)
102 return orig(repo.unfiltered(), *args, **kwargs)
103 return wrapper
103 return wrapper
104
104
105 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
105 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
106 'unbundle'))
106 'unbundle'))
107 legacycaps = moderncaps.union(set(['changegroupsubset']))
107 legacycaps = moderncaps.union(set(['changegroupsubset']))
108
108
109 class localpeer(peer.peerrepository):
109 class localpeer(peer.peerrepository):
110 '''peer for a local repo; reflects only the most recent API'''
110 '''peer for a local repo; reflects only the most recent API'''
111
111
112 def __init__(self, repo, caps=moderncaps):
112 def __init__(self, repo, caps=moderncaps):
113 peer.peerrepository.__init__(self)
113 peer.peerrepository.__init__(self)
114 self._repo = repo.filtered('served')
114 self._repo = repo.filtered('served')
115 self.ui = repo.ui
115 self.ui = repo.ui
116 self._caps = repo._restrictcapabilities(caps)
116 self._caps = repo._restrictcapabilities(caps)
117 self.requirements = repo.requirements
117 self.requirements = repo.requirements
118 self.supportedformats = repo.supportedformats
118 self.supportedformats = repo.supportedformats
119
119
120 def close(self):
120 def close(self):
121 self._repo.close()
121 self._repo.close()
122
122
123 def _capabilities(self):
123 def _capabilities(self):
124 return self._caps
124 return self._caps
125
125
126 def local(self):
126 def local(self):
127 return self._repo
127 return self._repo
128
128
129 def canpush(self):
129 def canpush(self):
130 return True
130 return True
131
131
132 def url(self):
132 def url(self):
133 return self._repo.url()
133 return self._repo.url()
134
134
135 def lookup(self, key):
135 def lookup(self, key):
136 return self._repo.lookup(key)
136 return self._repo.lookup(key)
137
137
138 def branchmap(self):
138 def branchmap(self):
139 return self._repo.branchmap()
139 return self._repo.branchmap()
140
140
141 def heads(self):
141 def heads(self):
142 return self._repo.heads()
142 return self._repo.heads()
143
143
144 def known(self, nodes):
144 def known(self, nodes):
145 return self._repo.known(nodes)
145 return self._repo.known(nodes)
146
146
147 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
147 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
148 **kwargs):
148 **kwargs):
149 cg = exchange.getbundle(self._repo, source, heads=heads,
149 cg = exchange.getbundle(self._repo, source, heads=heads,
150 common=common, bundlecaps=bundlecaps, **kwargs)
150 common=common, bundlecaps=bundlecaps, **kwargs)
151 if bundlecaps is not None and 'HG20' in bundlecaps:
151 if bundlecaps is not None and 'HG20' in bundlecaps:
152 # When requesting a bundle2, getbundle returns a stream to make the
152 # When requesting a bundle2, getbundle returns a stream to make the
153 # wire level function happier. We need to build a proper object
153 # wire level function happier. We need to build a proper object
154 # from it in local peer.
154 # from it in local peer.
155 cg = bundle2.getunbundler(self.ui, cg)
155 cg = bundle2.getunbundler(self.ui, cg)
156 return cg
156 return cg
157
157
158 # TODO We might want to move the next two calls into legacypeer and add
158 # TODO We might want to move the next two calls into legacypeer and add
159 # unbundle instead.
159 # unbundle instead.
160
160
161 def unbundle(self, cg, heads, url):
161 def unbundle(self, cg, heads, url):
162 """apply a bundle on a repo
162 """apply a bundle on a repo
163
163
164 This function handles the repo locking itself."""
164 This function handles the repo locking itself."""
165 try:
165 try:
166 try:
166 try:
167 cg = exchange.readbundle(self.ui, cg, None)
167 cg = exchange.readbundle(self.ui, cg, None)
168 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
168 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
169 if util.safehasattr(ret, 'getchunks'):
169 if util.safehasattr(ret, 'getchunks'):
170 # This is a bundle20 object, turn it into an unbundler.
170 # This is a bundle20 object, turn it into an unbundler.
171 # This little dance should be dropped eventually when the
171 # This little dance should be dropped eventually when the
172 # API is finally improved.
172 # API is finally improved.
173 stream = util.chunkbuffer(ret.getchunks())
173 stream = util.chunkbuffer(ret.getchunks())
174 ret = bundle2.getunbundler(self.ui, stream)
174 ret = bundle2.getunbundler(self.ui, stream)
175 return ret
175 return ret
176 except Exception as exc:
176 except Exception as exc:
177 # If the exception contains output salvaged from a bundle2
177 # If the exception contains output salvaged from a bundle2
178 # reply, we need to make sure it is printed before continuing
178 # reply, we need to make sure it is printed before continuing
179 # to fail. So we build a bundle2 with such output and consume
179 # to fail. So we build a bundle2 with such output and consume
180 # it directly.
180 # it directly.
181 #
181 #
182 # This is not very elegant but allows a "simple" solution for
182 # This is not very elegant but allows a "simple" solution for
183 # issue4594
183 # issue4594
184 output = getattr(exc, '_bundle2salvagedoutput', ())
184 output = getattr(exc, '_bundle2salvagedoutput', ())
185 if output:
185 if output:
186 bundler = bundle2.bundle20(self._repo.ui)
186 bundler = bundle2.bundle20(self._repo.ui)
187 for out in output:
187 for out in output:
188 bundler.addpart(out)
188 bundler.addpart(out)
189 stream = util.chunkbuffer(bundler.getchunks())
189 stream = util.chunkbuffer(bundler.getchunks())
190 b = bundle2.getunbundler(self.ui, stream)
190 b = bundle2.getunbundler(self.ui, stream)
191 bundle2.processbundle(self._repo, b)
191 bundle2.processbundle(self._repo, b)
192 raise
192 raise
193 except error.PushRaced as exc:
193 except error.PushRaced as exc:
194 raise error.ResponseError(_('push failed:'), str(exc))
194 raise error.ResponseError(_('push failed:'), str(exc))
195
195
196 def lock(self):
196 def lock(self):
197 return self._repo.lock()
197 return self._repo.lock()
198
198
199 def addchangegroup(self, cg, source, url):
199 def addchangegroup(self, cg, source, url):
200 return cg.apply(self._repo, source, url)
200 return cg.apply(self._repo, source, url)
201
201
202 def pushkey(self, namespace, key, old, new):
202 def pushkey(self, namespace, key, old, new):
203 return self._repo.pushkey(namespace, key, old, new)
203 return self._repo.pushkey(namespace, key, old, new)
204
204
205 def listkeys(self, namespace):
205 def listkeys(self, namespace):
206 return self._repo.listkeys(namespace)
206 return self._repo.listkeys(namespace)
207
207
208 def debugwireargs(self, one, two, three=None, four=None, five=None):
208 def debugwireargs(self, one, two, three=None, four=None, five=None):
209 '''used to test argument passing over the wire'''
209 '''used to test argument passing over the wire'''
210 return "%s %s %s %s %s" % (one, two, three, four, five)
210 return "%s %s %s %s %s" % (one, two, three, four, five)
211
211
212 class locallegacypeer(localpeer):
212 class locallegacypeer(localpeer):
213 '''peer extension which implements legacy methods too; used for tests with
213 '''peer extension which implements legacy methods too; used for tests with
214 restricted capabilities'''
214 restricted capabilities'''
215
215
216 def __init__(self, repo):
216 def __init__(self, repo):
217 localpeer.__init__(self, repo, caps=legacycaps)
217 localpeer.__init__(self, repo, caps=legacycaps)
218
218
219 def branches(self, nodes):
219 def branches(self, nodes):
220 return self._repo.branches(nodes)
220 return self._repo.branches(nodes)
221
221
222 def between(self, pairs):
222 def between(self, pairs):
223 return self._repo.between(pairs)
223 return self._repo.between(pairs)
224
224
225 def changegroup(self, basenodes, source):
225 def changegroup(self, basenodes, source):
226 return changegroup.changegroup(self._repo, basenodes, source)
226 return changegroup.changegroup(self._repo, basenodes, source)
227
227
228 def changegroupsubset(self, bases, heads, source):
228 def changegroupsubset(self, bases, heads, source):
229 return changegroup.changegroupsubset(self._repo, bases, heads, source)
229 return changegroup.changegroupsubset(self._repo, bases, heads, source)
230
230
231 class localrepository(object):
231 class localrepository(object):
232
232
233 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
233 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
234 'manifestv2'))
234 'manifestv2'))
235 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
235 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
236 'dotencode'))
236 'dotencode'))
237 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
237 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
238 filtername = None
238 filtername = None
239
239
240 # a list of (ui, featureset) functions.
240 # a list of (ui, featureset) functions.
241 # only functions defined in module of enabled extensions are invoked
241 # only functions defined in module of enabled extensions are invoked
242 featuresetupfuncs = set()
242 featuresetupfuncs = set()
243
243
244 def __init__(self, baseui, path=None, create=False):
244 def __init__(self, baseui, path=None, create=False):
245 self.requirements = set()
245 self.requirements = set()
246 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
246 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
247 self.wopener = self.wvfs
247 self.wopener = self.wvfs
248 self.root = self.wvfs.base
248 self.root = self.wvfs.base
249 self.path = self.wvfs.join(".hg")
249 self.path = self.wvfs.join(".hg")
250 self.origroot = path
250 self.origroot = path
251 self.auditor = pathutil.pathauditor(self.root, self._checknested)
251 self.auditor = pathutil.pathauditor(self.root, self._checknested)
252 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
252 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
253 realfs=False)
253 realfs=False)
254 self.vfs = scmutil.vfs(self.path)
254 self.vfs = scmutil.vfs(self.path)
255 self.opener = self.vfs
255 self.opener = self.vfs
256 self.baseui = baseui
256 self.baseui = baseui
257 self.ui = baseui.copy()
257 self.ui = baseui.copy()
258 self.ui.copy = baseui.copy # prevent copying repo configuration
258 self.ui.copy = baseui.copy # prevent copying repo configuration
259 # A list of callback to shape the phase if no data were found.
259 # A list of callback to shape the phase if no data were found.
260 # Callback are in the form: func(repo, roots) --> processed root.
260 # Callback are in the form: func(repo, roots) --> processed root.
261 # This list it to be filled by extension during repo setup
261 # This list it to be filled by extension during repo setup
262 self._phasedefaults = []
262 self._phasedefaults = []
263 try:
263 try:
264 self.ui.readconfig(self.join("hgrc"), self.root)
264 self.ui.readconfig(self.join("hgrc"), self.root)
265 extensions.loadall(self.ui)
265 extensions.loadall(self.ui)
266 except IOError:
266 except IOError:
267 pass
267 pass
268
268
269 if self.featuresetupfuncs:
269 if self.featuresetupfuncs:
270 self.supported = set(self._basesupported) # use private copy
270 self.supported = set(self._basesupported) # use private copy
271 extmods = set(m.__name__ for n, m
271 extmods = set(m.__name__ for n, m
272 in extensions.extensions(self.ui))
272 in extensions.extensions(self.ui))
273 for setupfunc in self.featuresetupfuncs:
273 for setupfunc in self.featuresetupfuncs:
274 if setupfunc.__module__ in extmods:
274 if setupfunc.__module__ in extmods:
275 setupfunc(self.ui, self.supported)
275 setupfunc(self.ui, self.supported)
276 else:
276 else:
277 self.supported = self._basesupported
277 self.supported = self._basesupported
278
278
279 if not self.vfs.isdir():
279 if not self.vfs.isdir():
280 if create:
280 if create:
281 self.requirements = newreporequirements(self)
281 self.requirements = newreporequirements(self)
282
282
283 if not self.wvfs.exists():
283 if not self.wvfs.exists():
284 self.wvfs.makedirs()
284 self.wvfs.makedirs()
285 self.vfs.makedir(notindexed=True)
285 self.vfs.makedir(notindexed=True)
286
286
287 if 'store' in self.requirements:
287 if 'store' in self.requirements:
288 self.vfs.mkdir("store")
288 self.vfs.mkdir("store")
289
289
290 # create an invalid changelog
290 # create an invalid changelog
291 self.vfs.append(
291 self.vfs.append(
292 "00changelog.i",
292 "00changelog.i",
293 '\0\0\0\2' # represents revlogv2
293 '\0\0\0\2' # represents revlogv2
294 ' dummy changelog to prevent using the old repo layout'
294 ' dummy changelog to prevent using the old repo layout'
295 )
295 )
296 else:
296 else:
297 raise error.RepoError(_("repository %s not found") % path)
297 raise error.RepoError(_("repository %s not found") % path)
298 elif create:
298 elif create:
299 raise error.RepoError(_("repository %s already exists") % path)
299 raise error.RepoError(_("repository %s already exists") % path)
300 else:
300 else:
301 try:
301 try:
302 self.requirements = scmutil.readrequires(
302 self.requirements = scmutil.readrequires(
303 self.vfs, self.supported)
303 self.vfs, self.supported)
304 except IOError as inst:
304 except IOError as inst:
305 if inst.errno != errno.ENOENT:
305 if inst.errno != errno.ENOENT:
306 raise
306 raise
307
307
308 self.sharedpath = self.path
308 self.sharedpath = self.path
309 try:
309 try:
310 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
310 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
311 realpath=True)
311 realpath=True)
312 s = vfs.base
312 s = vfs.base
313 if not vfs.exists():
313 if not vfs.exists():
314 raise error.RepoError(
314 raise error.RepoError(
315 _('.hg/sharedpath points to nonexistent directory %s') % s)
315 _('.hg/sharedpath points to nonexistent directory %s') % s)
316 self.sharedpath = s
316 self.sharedpath = s
317 except IOError as inst:
317 except IOError as inst:
318 if inst.errno != errno.ENOENT:
318 if inst.errno != errno.ENOENT:
319 raise
319 raise
320
320
321 self.store = store.store(
321 self.store = store.store(
322 self.requirements, self.sharedpath, scmutil.vfs)
322 self.requirements, self.sharedpath, scmutil.vfs)
323 self.spath = self.store.path
323 self.spath = self.store.path
324 self.svfs = self.store.vfs
324 self.svfs = self.store.vfs
325 self.sjoin = self.store.join
325 self.sjoin = self.store.join
326 self.vfs.createmode = self.store.createmode
326 self.vfs.createmode = self.store.createmode
327 self._applyopenerreqs()
327 self._applyopenerreqs()
328 if create:
328 if create:
329 self._writerequirements()
329 self._writerequirements()
330
330
331 self._dirstatevalidatewarned = False
331 self._dirstatevalidatewarned = False
332
332
333 self._branchcaches = {}
333 self._branchcaches = {}
334 self._revbranchcache = None
334 self._revbranchcache = None
335 self.filterpats = {}
335 self.filterpats = {}
336 self._datafilters = {}
336 self._datafilters = {}
337 self._transref = self._lockref = self._wlockref = None
337 self._transref = self._lockref = self._wlockref = None
338
338
339 # A cache for various files under .hg/ that tracks file changes,
339 # A cache for various files under .hg/ that tracks file changes,
340 # (used by the filecache decorator)
340 # (used by the filecache decorator)
341 #
341 #
342 # Maps a property name to its util.filecacheentry
342 # Maps a property name to its util.filecacheentry
343 self._filecache = {}
343 self._filecache = {}
344
344
345 # hold sets of revision to be filtered
345 # hold sets of revision to be filtered
346 # should be cleared when something might have changed the filter value:
346 # should be cleared when something might have changed the filter value:
347 # - new changesets,
347 # - new changesets,
348 # - phase change,
348 # - phase change,
349 # - new obsolescence marker,
349 # - new obsolescence marker,
350 # - working directory parent change,
350 # - working directory parent change,
351 # - bookmark changes
351 # - bookmark changes
352 self.filteredrevcache = {}
352 self.filteredrevcache = {}
353
353
354 # generic mapping between names and nodes
354 # generic mapping between names and nodes
355 self.names = namespaces.namespaces()
355 self.names = namespaces.namespaces()
356
356
357 def close(self):
357 def close(self):
358 self._writecaches()
358 self._writecaches()
359
359
360 def _writecaches(self):
360 def _writecaches(self):
361 if self._revbranchcache:
361 if self._revbranchcache:
362 self._revbranchcache.write()
362 self._revbranchcache.write()
363
363
364 def _restrictcapabilities(self, caps):
364 def _restrictcapabilities(self, caps):
365 if self.ui.configbool('experimental', 'bundle2-advertise', True):
365 if self.ui.configbool('experimental', 'bundle2-advertise', True):
366 caps = set(caps)
366 caps = set(caps)
367 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
367 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
368 caps.add('bundle2=' + urlreq.quote(capsblob))
368 caps.add('bundle2=' + urlreq.quote(capsblob))
369 return caps
369 return caps
370
370
371 def _applyopenerreqs(self):
371 def _applyopenerreqs(self):
372 self.svfs.options = dict((r, 1) for r in self.requirements
372 self.svfs.options = dict((r, 1) for r in self.requirements
373 if r in self.openerreqs)
373 if r in self.openerreqs)
374 # experimental config: format.chunkcachesize
374 # experimental config: format.chunkcachesize
375 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
375 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
376 if chunkcachesize is not None:
376 if chunkcachesize is not None:
377 self.svfs.options['chunkcachesize'] = chunkcachesize
377 self.svfs.options['chunkcachesize'] = chunkcachesize
378 # experimental config: format.maxchainlen
378 # experimental config: format.maxchainlen
379 maxchainlen = self.ui.configint('format', 'maxchainlen')
379 maxchainlen = self.ui.configint('format', 'maxchainlen')
380 if maxchainlen is not None:
380 if maxchainlen is not None:
381 self.svfs.options['maxchainlen'] = maxchainlen
381 self.svfs.options['maxchainlen'] = maxchainlen
382 # experimental config: format.manifestcachesize
382 # experimental config: format.manifestcachesize
383 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
383 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
384 if manifestcachesize is not None:
384 if manifestcachesize is not None:
385 self.svfs.options['manifestcachesize'] = manifestcachesize
385 self.svfs.options['manifestcachesize'] = manifestcachesize
386 # experimental config: format.aggressivemergedeltas
386 # experimental config: format.aggressivemergedeltas
387 aggressivemergedeltas = self.ui.configbool('format',
387 aggressivemergedeltas = self.ui.configbool('format',
388 'aggressivemergedeltas', False)
388 'aggressivemergedeltas', False)
389 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
389 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
390 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
390 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
391
391
392 def _writerequirements(self):
392 def _writerequirements(self):
393 scmutil.writerequires(self.vfs, self.requirements)
393 scmutil.writerequires(self.vfs, self.requirements)
394
394
395 def _checknested(self, path):
395 def _checknested(self, path):
396 """Determine if path is a legal nested repository."""
396 """Determine if path is a legal nested repository."""
397 if not path.startswith(self.root):
397 if not path.startswith(self.root):
398 return False
398 return False
399 subpath = path[len(self.root) + 1:]
399 subpath = path[len(self.root) + 1:]
400 normsubpath = util.pconvert(subpath)
400 normsubpath = util.pconvert(subpath)
401
401
402 # XXX: Checking against the current working copy is wrong in
402 # XXX: Checking against the current working copy is wrong in
403 # the sense that it can reject things like
403 # the sense that it can reject things like
404 #
404 #
405 # $ hg cat -r 10 sub/x.txt
405 # $ hg cat -r 10 sub/x.txt
406 #
406 #
407 # if sub/ is no longer a subrepository in the working copy
407 # if sub/ is no longer a subrepository in the working copy
408 # parent revision.
408 # parent revision.
409 #
409 #
410 # However, it can of course also allow things that would have
410 # However, it can of course also allow things that would have
411 # been rejected before, such as the above cat command if sub/
411 # been rejected before, such as the above cat command if sub/
412 # is a subrepository now, but was a normal directory before.
412 # is a subrepository now, but was a normal directory before.
413 # The old path auditor would have rejected by mistake since it
413 # The old path auditor would have rejected by mistake since it
414 # panics when it sees sub/.hg/.
414 # panics when it sees sub/.hg/.
415 #
415 #
416 # All in all, checking against the working copy seems sensible
416 # All in all, checking against the working copy seems sensible
417 # since we want to prevent access to nested repositories on
417 # since we want to prevent access to nested repositories on
418 # the filesystem *now*.
418 # the filesystem *now*.
419 ctx = self[None]
419 ctx = self[None]
420 parts = util.splitpath(subpath)
420 parts = util.splitpath(subpath)
421 while parts:
421 while parts:
422 prefix = '/'.join(parts)
422 prefix = '/'.join(parts)
423 if prefix in ctx.substate:
423 if prefix in ctx.substate:
424 if prefix == normsubpath:
424 if prefix == normsubpath:
425 return True
425 return True
426 else:
426 else:
427 sub = ctx.sub(prefix)
427 sub = ctx.sub(prefix)
428 return sub.checknested(subpath[len(prefix) + 1:])
428 return sub.checknested(subpath[len(prefix) + 1:])
429 else:
429 else:
430 parts.pop()
430 parts.pop()
431 return False
431 return False
432
432
433 def peer(self):
433 def peer(self):
434 return localpeer(self) # not cached to avoid reference cycle
434 return localpeer(self) # not cached to avoid reference cycle
435
435
436 def unfiltered(self):
436 def unfiltered(self):
437 """Return unfiltered version of the repository
437 """Return unfiltered version of the repository
438
438
439 Intended to be overwritten by filtered repo."""
439 Intended to be overwritten by filtered repo."""
440 return self
440 return self
441
441
442 def filtered(self, name):
442 def filtered(self, name):
443 """Return a filtered version of a repository"""
443 """Return a filtered version of a repository"""
444 # build a new class with the mixin and the current class
444 # build a new class with the mixin and the current class
445 # (possibly subclass of the repo)
445 # (possibly subclass of the repo)
446 class proxycls(repoview.repoview, self.unfiltered().__class__):
446 class proxycls(repoview.repoview, self.unfiltered().__class__):
447 pass
447 pass
448 return proxycls(self, name)
448 return proxycls(self, name)
449
449
450 @repofilecache('bookmarks', 'bookmarks.current')
450 @repofilecache('bookmarks', 'bookmarks.current')
451 def _bookmarks(self):
451 def _bookmarks(self):
452 return bookmarks.bmstore(self)
452 return bookmarks.bmstore(self)
453
453
454 @property
454 @property
455 def _activebookmark(self):
455 def _activebookmark(self):
456 return self._bookmarks.active
456 return self._bookmarks.active
457
457
458 def bookmarkheads(self, bookmark):
458 def bookmarkheads(self, bookmark):
459 name = bookmark.split('@', 1)[0]
459 name = bookmark.split('@', 1)[0]
460 heads = []
460 heads = []
461 for mark, n in self._bookmarks.iteritems():
461 for mark, n in self._bookmarks.iteritems():
462 if mark.split('@', 1)[0] == name:
462 if mark.split('@', 1)[0] == name:
463 heads.append(n)
463 heads.append(n)
464 return heads
464 return heads
465
465
466 # _phaserevs and _phasesets depend on changelog. what we need is to
466 # _phaserevs and _phasesets depend on changelog. what we need is to
467 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
467 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
468 # can't be easily expressed in filecache mechanism.
468 # can't be easily expressed in filecache mechanism.
469 @storecache('phaseroots', '00changelog.i')
469 @storecache('phaseroots', '00changelog.i')
470 def _phasecache(self):
470 def _phasecache(self):
471 return phases.phasecache(self, self._phasedefaults)
471 return phases.phasecache(self, self._phasedefaults)
472
472
473 @storecache('obsstore')
473 @storecache('obsstore')
474 def obsstore(self):
474 def obsstore(self):
475 # read default format for new obsstore.
475 # read default format for new obsstore.
476 # developer config: format.obsstore-version
476 # developer config: format.obsstore-version
477 defaultformat = self.ui.configint('format', 'obsstore-version', None)
477 defaultformat = self.ui.configint('format', 'obsstore-version', None)
478 # rely on obsstore class default when possible.
478 # rely on obsstore class default when possible.
479 kwargs = {}
479 kwargs = {}
480 if defaultformat is not None:
480 if defaultformat is not None:
481 kwargs['defaultformat'] = defaultformat
481 kwargs['defaultformat'] = defaultformat
482 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
482 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
483 store = obsolete.obsstore(self.svfs, readonly=readonly,
483 store = obsolete.obsstore(self.svfs, readonly=readonly,
484 **kwargs)
484 **kwargs)
485 if store and readonly:
485 if store and readonly:
486 self.ui.warn(
486 self.ui.warn(
487 _('obsolete feature not enabled but %i markers found!\n')
487 _('obsolete feature not enabled but %i markers found!\n')
488 % len(list(store)))
488 % len(list(store)))
489 return store
489 return store
490
490
491 @storecache('00changelog.i')
491 @storecache('00changelog.i')
492 def changelog(self):
492 def changelog(self):
493 c = changelog.changelog(self.svfs)
493 c = changelog.changelog(self.svfs)
494 if 'HG_PENDING' in os.environ:
494 if 'HG_PENDING' in os.environ:
495 p = os.environ['HG_PENDING']
495 p = os.environ['HG_PENDING']
496 if p.startswith(self.root):
496 if p.startswith(self.root):
497 c.readpending('00changelog.i.a')
497 c.readpending('00changelog.i.a')
498 return c
498 return c
499
499
500 @storecache('00manifest.i')
500 @storecache('00manifest.i')
501 def manifest(self):
501 def manifest(self):
502 return manifest.manifest(self.svfs)
502 return manifest.manifest(self.svfs)
503
503
504 def dirlog(self, dir):
504 def dirlog(self, dir):
505 return self.manifest.dirlog(dir)
505 return self.manifest.dirlog(dir)
506
506
507 @repofilecache('dirstate')
507 @repofilecache('dirstate')
508 def dirstate(self):
508 def dirstate(self):
509 return dirstate.dirstate(self.vfs, self.ui, self.root,
509 return dirstate.dirstate(self.vfs, self.ui, self.root,
510 self._dirstatevalidate)
510 self._dirstatevalidate)
511
511
512 def _dirstatevalidate(self, node):
512 def _dirstatevalidate(self, node):
513 try:
513 try:
514 self.changelog.rev(node)
514 self.changelog.rev(node)
515 return node
515 return node
516 except error.LookupError:
516 except error.LookupError:
517 if not self._dirstatevalidatewarned:
517 if not self._dirstatevalidatewarned:
518 self._dirstatevalidatewarned = True
518 self._dirstatevalidatewarned = True
519 self.ui.warn(_("warning: ignoring unknown"
519 self.ui.warn(_("warning: ignoring unknown"
520 " working parent %s!\n") % short(node))
520 " working parent %s!\n") % short(node))
521 return nullid
521 return nullid
522
522
523 def __getitem__(self, changeid):
523 def __getitem__(self, changeid):
524 if changeid is None or changeid == wdirrev:
524 if changeid is None or changeid == wdirrev:
525 return context.workingctx(self)
525 return context.workingctx(self)
526 if isinstance(changeid, slice):
526 if isinstance(changeid, slice):
527 return [context.changectx(self, i)
527 return [context.changectx(self, i)
528 for i in xrange(*changeid.indices(len(self)))
528 for i in xrange(*changeid.indices(len(self)))
529 if i not in self.changelog.filteredrevs]
529 if i not in self.changelog.filteredrevs]
530 return context.changectx(self, changeid)
530 return context.changectx(self, changeid)
531
531
532 def __contains__(self, changeid):
532 def __contains__(self, changeid):
533 try:
533 try:
534 self[changeid]
534 self[changeid]
535 return True
535 return True
536 except error.RepoLookupError:
536 except error.RepoLookupError:
537 return False
537 return False
538
538
539 def __nonzero__(self):
539 def __nonzero__(self):
540 return True
540 return True
541
541
542 def __len__(self):
542 def __len__(self):
543 return len(self.changelog)
543 return len(self.changelog)
544
544
545 def __iter__(self):
545 def __iter__(self):
546 return iter(self.changelog)
546 return iter(self.changelog)
547
547
548 def revs(self, expr, *args):
548 def revs(self, expr, *args):
549 '''Find revisions matching a revset.
549 '''Find revisions matching a revset.
550
550
551 The revset is specified as a string ``expr`` that may contain
551 The revset is specified as a string ``expr`` that may contain
552 %-formatting to escape certain types. See ``revset.formatspec``.
552 %-formatting to escape certain types. See ``revset.formatspec``.
553
553
554 Return a revset.abstractsmartset, which is a list-like interface
554 Return a revset.abstractsmartset, which is a list-like interface
555 that contains integer revisions.
555 that contains integer revisions.
556 '''
556 '''
557 expr = revset.formatspec(expr, *args)
557 expr = revset.formatspec(expr, *args)
558 m = revset.match(None, expr)
558 m = revset.match(None, expr)
559 return m(self)
559 return m(self)
560
560
561 def set(self, expr, *args):
561 def set(self, expr, *args):
562 '''Find revisions matching a revset and emit changectx instances.
562 '''Find revisions matching a revset and emit changectx instances.
563
563
564 This is a convenience wrapper around ``revs()`` that iterates the
564 This is a convenience wrapper around ``revs()`` that iterates the
565 result and is a generator of changectx instances.
565 result and is a generator of changectx instances.
566 '''
566 '''
567 for r in self.revs(expr, *args):
567 for r in self.revs(expr, *args):
568 yield self[r]
568 yield self[r]
569
569
570 def url(self):
570 def url(self):
571 return 'file:' + self.root
571 return 'file:' + self.root
572
572
573 def hook(self, name, throw=False, **args):
573 def hook(self, name, throw=False, **args):
574 """Call a hook, passing this repo instance.
574 """Call a hook, passing this repo instance.
575
575
576 This a convenience method to aid invoking hooks. Extensions likely
576 This a convenience method to aid invoking hooks. Extensions likely
577 won't call this unless they have registered a custom hook or are
577 won't call this unless they have registered a custom hook or are
578 replacing code that is expected to call a hook.
578 replacing code that is expected to call a hook.
579 """
579 """
580 return hook.hook(self.ui, self, name, throw, **args)
580 return hook.hook(self.ui, self, name, throw, **args)
581
581
582 @unfilteredmethod
582 @unfilteredmethod
583 def _tag(self, names, node, message, local, user, date, extra=None,
583 def _tag(self, names, node, message, local, user, date, extra=None,
584 editor=False):
584 editor=False):
585 if isinstance(names, str):
585 if isinstance(names, str):
586 names = (names,)
586 names = (names,)
587
587
588 branches = self.branchmap()
588 branches = self.branchmap()
589 for name in names:
589 for name in names:
590 self.hook('pretag', throw=True, node=hex(node), tag=name,
590 self.hook('pretag', throw=True, node=hex(node), tag=name,
591 local=local)
591 local=local)
592 if name in branches:
592 if name in branches:
593 self.ui.warn(_("warning: tag %s conflicts with existing"
593 self.ui.warn(_("warning: tag %s conflicts with existing"
594 " branch name\n") % name)
594 " branch name\n") % name)
595
595
596 def writetags(fp, names, munge, prevtags):
596 def writetags(fp, names, munge, prevtags):
597 fp.seek(0, 2)
597 fp.seek(0, 2)
598 if prevtags and prevtags[-1] != '\n':
598 if prevtags and prevtags[-1] != '\n':
599 fp.write('\n')
599 fp.write('\n')
600 for name in names:
600 for name in names:
601 if munge:
601 if munge:
602 m = munge(name)
602 m = munge(name)
603 else:
603 else:
604 m = name
604 m = name
605
605
606 if (self._tagscache.tagtypes and
606 if (self._tagscache.tagtypes and
607 name in self._tagscache.tagtypes):
607 name in self._tagscache.tagtypes):
608 old = self.tags().get(name, nullid)
608 old = self.tags().get(name, nullid)
609 fp.write('%s %s\n' % (hex(old), m))
609 fp.write('%s %s\n' % (hex(old), m))
610 fp.write('%s %s\n' % (hex(node), m))
610 fp.write('%s %s\n' % (hex(node), m))
611 fp.close()
611 fp.close()
612
612
613 prevtags = ''
613 prevtags = ''
614 if local:
614 if local:
615 try:
615 try:
616 fp = self.vfs('localtags', 'r+')
616 fp = self.vfs('localtags', 'r+')
617 except IOError:
617 except IOError:
618 fp = self.vfs('localtags', 'a')
618 fp = self.vfs('localtags', 'a')
619 else:
619 else:
620 prevtags = fp.read()
620 prevtags = fp.read()
621
621
622 # local tags are stored in the current charset
622 # local tags are stored in the current charset
623 writetags(fp, names, None, prevtags)
623 writetags(fp, names, None, prevtags)
624 for name in names:
624 for name in names:
625 self.hook('tag', node=hex(node), tag=name, local=local)
625 self.hook('tag', node=hex(node), tag=name, local=local)
626 return
626 return
627
627
628 try:
628 try:
629 fp = self.wfile('.hgtags', 'rb+')
629 fp = self.wfile('.hgtags', 'rb+')
630 except IOError as e:
630 except IOError as e:
631 if e.errno != errno.ENOENT:
631 if e.errno != errno.ENOENT:
632 raise
632 raise
633 fp = self.wfile('.hgtags', 'ab')
633 fp = self.wfile('.hgtags', 'ab')
634 else:
634 else:
635 prevtags = fp.read()
635 prevtags = fp.read()
636
636
637 # committed tags are stored in UTF-8
637 # committed tags are stored in UTF-8
638 writetags(fp, names, encoding.fromlocal, prevtags)
638 writetags(fp, names, encoding.fromlocal, prevtags)
639
639
640 fp.close()
640 fp.close()
641
641
642 self.invalidatecaches()
642 self.invalidatecaches()
643
643
644 if '.hgtags' not in self.dirstate:
644 if '.hgtags' not in self.dirstate:
645 self[None].add(['.hgtags'])
645 self[None].add(['.hgtags'])
646
646
647 m = matchmod.exact(self.root, '', ['.hgtags'])
647 m = matchmod.exact(self.root, '', ['.hgtags'])
648 tagnode = self.commit(message, user, date, extra=extra, match=m,
648 tagnode = self.commit(message, user, date, extra=extra, match=m,
649 editor=editor)
649 editor=editor)
650
650
651 for name in names:
651 for name in names:
652 self.hook('tag', node=hex(node), tag=name, local=local)
652 self.hook('tag', node=hex(node), tag=name, local=local)
653
653
654 return tagnode
654 return tagnode
655
655
656 def tag(self, names, node, message, local, user, date, editor=False):
656 def tag(self, names, node, message, local, user, date, editor=False):
657 '''tag a revision with one or more symbolic names.
657 '''tag a revision with one or more symbolic names.
658
658
659 names is a list of strings or, when adding a single tag, names may be a
659 names is a list of strings or, when adding a single tag, names may be a
660 string.
660 string.
661
661
662 if local is True, the tags are stored in a per-repository file.
662 if local is True, the tags are stored in a per-repository file.
663 otherwise, they are stored in the .hgtags file, and a new
663 otherwise, they are stored in the .hgtags file, and a new
664 changeset is committed with the change.
664 changeset is committed with the change.
665
665
666 keyword arguments:
666 keyword arguments:
667
667
668 local: whether to store tags in non-version-controlled file
668 local: whether to store tags in non-version-controlled file
669 (default False)
669 (default False)
670
670
671 message: commit message to use if committing
671 message: commit message to use if committing
672
672
673 user: name of user to use if committing
673 user: name of user to use if committing
674
674
675 date: date tuple to use if committing'''
675 date: date tuple to use if committing'''
676
676
677 if not local:
677 if not local:
678 m = matchmod.exact(self.root, '', ['.hgtags'])
678 m = matchmod.exact(self.root, '', ['.hgtags'])
679 if any(self.status(match=m, unknown=True, ignored=True)):
679 if any(self.status(match=m, unknown=True, ignored=True)):
680 raise error.Abort(_('working copy of .hgtags is changed'),
680 raise error.Abort(_('working copy of .hgtags is changed'),
681 hint=_('please commit .hgtags manually'))
681 hint=_('please commit .hgtags manually'))
682
682
683 self.tags() # instantiate the cache
683 self.tags() # instantiate the cache
684 self._tag(names, node, message, local, user, date, editor=editor)
684 self._tag(names, node, message, local, user, date, editor=editor)
685
685
686 @filteredpropertycache
686 @filteredpropertycache
687 def _tagscache(self):
687 def _tagscache(self):
688 '''Returns a tagscache object that contains various tags related
688 '''Returns a tagscache object that contains various tags related
689 caches.'''
689 caches.'''
690
690
691 # This simplifies its cache management by having one decorated
691 # This simplifies its cache management by having one decorated
692 # function (this one) and the rest simply fetch things from it.
692 # function (this one) and the rest simply fetch things from it.
693 class tagscache(object):
693 class tagscache(object):
694 def __init__(self):
694 def __init__(self):
695 # These two define the set of tags for this repository. tags
695 # These two define the set of tags for this repository. tags
696 # maps tag name to node; tagtypes maps tag name to 'global' or
696 # maps tag name to node; tagtypes maps tag name to 'global' or
697 # 'local'. (Global tags are defined by .hgtags across all
697 # 'local'. (Global tags are defined by .hgtags across all
698 # heads, and local tags are defined in .hg/localtags.)
698 # heads, and local tags are defined in .hg/localtags.)
699 # They constitute the in-memory cache of tags.
699 # They constitute the in-memory cache of tags.
700 self.tags = self.tagtypes = None
700 self.tags = self.tagtypes = None
701
701
702 self.nodetagscache = self.tagslist = None
702 self.nodetagscache = self.tagslist = None
703
703
704 cache = tagscache()
704 cache = tagscache()
705 cache.tags, cache.tagtypes = self._findtags()
705 cache.tags, cache.tagtypes = self._findtags()
706
706
707 return cache
707 return cache
708
708
709 def tags(self):
709 def tags(self):
710 '''return a mapping of tag to node'''
710 '''return a mapping of tag to node'''
711 t = {}
711 t = {}
712 if self.changelog.filteredrevs:
712 if self.changelog.filteredrevs:
713 tags, tt = self._findtags()
713 tags, tt = self._findtags()
714 else:
714 else:
715 tags = self._tagscache.tags
715 tags = self._tagscache.tags
716 for k, v in tags.iteritems():
716 for k, v in tags.iteritems():
717 try:
717 try:
718 # ignore tags to unknown nodes
718 # ignore tags to unknown nodes
719 self.changelog.rev(v)
719 self.changelog.rev(v)
720 t[k] = v
720 t[k] = v
721 except (error.LookupError, ValueError):
721 except (error.LookupError, ValueError):
722 pass
722 pass
723 return t
723 return t
724
724
725 def _findtags(self):
725 def _findtags(self):
726 '''Do the hard work of finding tags. Return a pair of dicts
726 '''Do the hard work of finding tags. Return a pair of dicts
727 (tags, tagtypes) where tags maps tag name to node, and tagtypes
727 (tags, tagtypes) where tags maps tag name to node, and tagtypes
728 maps tag name to a string like \'global\' or \'local\'.
728 maps tag name to a string like \'global\' or \'local\'.
729 Subclasses or extensions are free to add their own tags, but
729 Subclasses or extensions are free to add their own tags, but
730 should be aware that the returned dicts will be retained for the
730 should be aware that the returned dicts will be retained for the
731 duration of the localrepo object.'''
731 duration of the localrepo object.'''
732
732
733 # XXX what tagtype should subclasses/extensions use? Currently
733 # XXX what tagtype should subclasses/extensions use? Currently
734 # mq and bookmarks add tags, but do not set the tagtype at all.
734 # mq and bookmarks add tags, but do not set the tagtype at all.
735 # Should each extension invent its own tag type? Should there
735 # Should each extension invent its own tag type? Should there
736 # be one tagtype for all such "virtual" tags? Or is the status
736 # be one tagtype for all such "virtual" tags? Or is the status
737 # quo fine?
737 # quo fine?
738
738
739 alltags = {} # map tag name to (node, hist)
739 alltags = {} # map tag name to (node, hist)
740 tagtypes = {}
740 tagtypes = {}
741
741
742 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
742 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
743 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
743 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
744
744
745 # Build the return dicts. Have to re-encode tag names because
745 # Build the return dicts. Have to re-encode tag names because
746 # the tags module always uses UTF-8 (in order not to lose info
746 # the tags module always uses UTF-8 (in order not to lose info
747 # writing to the cache), but the rest of Mercurial wants them in
747 # writing to the cache), but the rest of Mercurial wants them in
748 # local encoding.
748 # local encoding.
749 tags = {}
749 tags = {}
750 for (name, (node, hist)) in alltags.iteritems():
750 for (name, (node, hist)) in alltags.iteritems():
751 if node != nullid:
751 if node != nullid:
752 tags[encoding.tolocal(name)] = node
752 tags[encoding.tolocal(name)] = node
753 tags['tip'] = self.changelog.tip()
753 tags['tip'] = self.changelog.tip()
754 tagtypes = dict([(encoding.tolocal(name), value)
754 tagtypes = dict([(encoding.tolocal(name), value)
755 for (name, value) in tagtypes.iteritems()])
755 for (name, value) in tagtypes.iteritems()])
756 return (tags, tagtypes)
756 return (tags, tagtypes)
757
757
758 def tagtype(self, tagname):
758 def tagtype(self, tagname):
759 '''
759 '''
760 return the type of the given tag. result can be:
760 return the type of the given tag. result can be:
761
761
762 'local' : a local tag
762 'local' : a local tag
763 'global' : a global tag
763 'global' : a global tag
764 None : tag does not exist
764 None : tag does not exist
765 '''
765 '''
766
766
767 return self._tagscache.tagtypes.get(tagname)
767 return self._tagscache.tagtypes.get(tagname)
768
768
769 def tagslist(self):
769 def tagslist(self):
770 '''return a list of tags ordered by revision'''
770 '''return a list of tags ordered by revision'''
771 if not self._tagscache.tagslist:
771 if not self._tagscache.tagslist:
772 l = []
772 l = []
773 for t, n in self.tags().iteritems():
773 for t, n in self.tags().iteritems():
774 l.append((self.changelog.rev(n), t, n))
774 l.append((self.changelog.rev(n), t, n))
775 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
775 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
776
776
777 return self._tagscache.tagslist
777 return self._tagscache.tagslist
778
778
779 def nodetags(self, node):
779 def nodetags(self, node):
780 '''return the tags associated with a node'''
780 '''return the tags associated with a node'''
781 if not self._tagscache.nodetagscache:
781 if not self._tagscache.nodetagscache:
782 nodetagscache = {}
782 nodetagscache = {}
783 for t, n in self._tagscache.tags.iteritems():
783 for t, n in self._tagscache.tags.iteritems():
784 nodetagscache.setdefault(n, []).append(t)
784 nodetagscache.setdefault(n, []).append(t)
785 for tags in nodetagscache.itervalues():
785 for tags in nodetagscache.itervalues():
786 tags.sort()
786 tags.sort()
787 self._tagscache.nodetagscache = nodetagscache
787 self._tagscache.nodetagscache = nodetagscache
788 return self._tagscache.nodetagscache.get(node, [])
788 return self._tagscache.nodetagscache.get(node, [])
789
789
790 def nodebookmarks(self, node):
790 def nodebookmarks(self, node):
791 """return the list of bookmarks pointing to the specified node"""
791 """return the list of bookmarks pointing to the specified node"""
792 marks = []
792 marks = []
793 for bookmark, n in self._bookmarks.iteritems():
793 for bookmark, n in self._bookmarks.iteritems():
794 if n == node:
794 if n == node:
795 marks.append(bookmark)
795 marks.append(bookmark)
796 return sorted(marks)
796 return sorted(marks)
797
797
798 def branchmap(self):
798 def branchmap(self):
799 '''returns a dictionary {branch: [branchheads]} with branchheads
799 '''returns a dictionary {branch: [branchheads]} with branchheads
800 ordered by increasing revision number'''
800 ordered by increasing revision number'''
801 branchmap.updatecache(self)
801 branchmap.updatecache(self)
802 return self._branchcaches[self.filtername]
802 return self._branchcaches[self.filtername]
803
803
804 @unfilteredmethod
804 @unfilteredmethod
805 def revbranchcache(self):
805 def revbranchcache(self):
806 if not self._revbranchcache:
806 if not self._revbranchcache:
807 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
807 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
808 return self._revbranchcache
808 return self._revbranchcache
809
809
810 def branchtip(self, branch, ignoremissing=False):
810 def branchtip(self, branch, ignoremissing=False):
811 '''return the tip node for a given branch
811 '''return the tip node for a given branch
812
812
813 If ignoremissing is True, then this method will not raise an error.
813 If ignoremissing is True, then this method will not raise an error.
814 This is helpful for callers that only expect None for a missing branch
814 This is helpful for callers that only expect None for a missing branch
815 (e.g. namespace).
815 (e.g. namespace).
816
816
817 '''
817 '''
818 try:
818 try:
819 return self.branchmap().branchtip(branch)
819 return self.branchmap().branchtip(branch)
820 except KeyError:
820 except KeyError:
821 if not ignoremissing:
821 if not ignoremissing:
822 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
822 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
823 else:
823 else:
824 pass
824 pass
825
825
826 def lookup(self, key):
826 def lookup(self, key):
827 return self[key].node()
827 return self[key].node()
828
828
829 def lookupbranch(self, key, remote=None):
829 def lookupbranch(self, key, remote=None):
830 repo = remote or self
830 repo = remote or self
831 if key in repo.branchmap():
831 if key in repo.branchmap():
832 return key
832 return key
833
833
834 repo = (remote and remote.local()) and remote or self
834 repo = (remote and remote.local()) and remote or self
835 return repo[key].branch()
835 return repo[key].branch()
836
836
837 def known(self, nodes):
837 def known(self, nodes):
838 cl = self.changelog
838 cl = self.changelog
839 nm = cl.nodemap
839 nm = cl.nodemap
840 filtered = cl.filteredrevs
840 filtered = cl.filteredrevs
841 result = []
841 result = []
842 for n in nodes:
842 for n in nodes:
843 r = nm.get(n)
843 r = nm.get(n)
844 resp = not (r is None or r in filtered)
844 resp = not (r is None or r in filtered)
845 result.append(resp)
845 result.append(resp)
846 return result
846 return result
847
847
848 def local(self):
848 def local(self):
849 return self
849 return self
850
850
851 def publishing(self):
851 def publishing(self):
852 # it's safe (and desirable) to trust the publish flag unconditionally
852 # it's safe (and desirable) to trust the publish flag unconditionally
853 # so that we don't finalize changes shared between users via ssh or nfs
853 # so that we don't finalize changes shared between users via ssh or nfs
854 return self.ui.configbool('phases', 'publish', True, untrusted=True)
854 return self.ui.configbool('phases', 'publish', True, untrusted=True)
855
855
856 def cancopy(self):
856 def cancopy(self):
857 # so statichttprepo's override of local() works
857 # so statichttprepo's override of local() works
858 if not self.local():
858 if not self.local():
859 return False
859 return False
860 if not self.publishing():
860 if not self.publishing():
861 return True
861 return True
862 # if publishing we can't copy if there is filtered content
862 # if publishing we can't copy if there is filtered content
863 return not self.filtered('visible').changelog.filteredrevs
863 return not self.filtered('visible').changelog.filteredrevs
864
864
865 def shared(self):
865 def shared(self):
866 '''the type of shared repository (None if not shared)'''
866 '''the type of shared repository (None if not shared)'''
867 if self.sharedpath != self.path:
867 if self.sharedpath != self.path:
868 return 'store'
868 return 'store'
869 return None
869 return None
870
870
871 def join(self, f, *insidef):
871 def join(self, f, *insidef):
872 return self.vfs.join(os.path.join(f, *insidef))
872 return self.vfs.join(os.path.join(f, *insidef))
873
873
874 def wjoin(self, f, *insidef):
874 def wjoin(self, f, *insidef):
875 return self.vfs.reljoin(self.root, f, *insidef)
875 return self.vfs.reljoin(self.root, f, *insidef)
876
876
877 def file(self, f):
877 def file(self, f):
878 if f[0] == '/':
878 if f[0] == '/':
879 f = f[1:]
879 f = f[1:]
880 return filelog.filelog(self.svfs, f)
880 return filelog.filelog(self.svfs, f)
881
881
882 def changectx(self, changeid):
882 def changectx(self, changeid):
883 return self[changeid]
883 return self[changeid]
884
884
885 def setparents(self, p1, p2=nullid):
885 def setparents(self, p1, p2=nullid):
886 self.dirstate.beginparentchange()
886 self.dirstate.beginparentchange()
887 copies = self.dirstate.setparents(p1, p2)
887 copies = self.dirstate.setparents(p1, p2)
888 pctx = self[p1]
888 pctx = self[p1]
889 if copies:
889 if copies:
890 # Adjust copy records, the dirstate cannot do it, it
890 # Adjust copy records, the dirstate cannot do it, it
891 # requires access to parents manifests. Preserve them
891 # requires access to parents manifests. Preserve them
892 # only for entries added to first parent.
892 # only for entries added to first parent.
893 for f in copies:
893 for f in copies:
894 if f not in pctx and copies[f] in pctx:
894 if f not in pctx and copies[f] in pctx:
895 self.dirstate.copy(copies[f], f)
895 self.dirstate.copy(copies[f], f)
896 if p2 == nullid:
896 if p2 == nullid:
897 for f, s in sorted(self.dirstate.copies().items()):
897 for f, s in sorted(self.dirstate.copies().items()):
898 if f not in pctx and s not in pctx:
898 if f not in pctx and s not in pctx:
899 self.dirstate.copy(None, f)
899 self.dirstate.copy(None, f)
900 self.dirstate.endparentchange()
900 self.dirstate.endparentchange()
901
901
902 def filectx(self, path, changeid=None, fileid=None):
902 def filectx(self, path, changeid=None, fileid=None):
903 """changeid can be a changeset revision, node, or tag.
903 """changeid can be a changeset revision, node, or tag.
904 fileid can be a file revision or node."""
904 fileid can be a file revision or node."""
905 return context.filectx(self, path, changeid, fileid)
905 return context.filectx(self, path, changeid, fileid)
906
906
907 def getcwd(self):
907 def getcwd(self):
908 return self.dirstate.getcwd()
908 return self.dirstate.getcwd()
909
909
910 def pathto(self, f, cwd=None):
910 def pathto(self, f, cwd=None):
911 return self.dirstate.pathto(f, cwd)
911 return self.dirstate.pathto(f, cwd)
912
912
913 def wfile(self, f, mode='r'):
913 def wfile(self, f, mode='r'):
914 return self.wvfs(f, mode)
914 return self.wvfs(f, mode)
915
915
916 def _link(self, f):
916 def _link(self, f):
917 return self.wvfs.islink(f)
917 return self.wvfs.islink(f)
918
918
919 def _loadfilter(self, filter):
919 def _loadfilter(self, filter):
920 if filter not in self.filterpats:
920 if filter not in self.filterpats:
921 l = []
921 l = []
922 for pat, cmd in self.ui.configitems(filter):
922 for pat, cmd in self.ui.configitems(filter):
923 if cmd == '!':
923 if cmd == '!':
924 continue
924 continue
925 mf = matchmod.match(self.root, '', [pat])
925 mf = matchmod.match(self.root, '', [pat])
926 fn = None
926 fn = None
927 params = cmd
927 params = cmd
928 for name, filterfn in self._datafilters.iteritems():
928 for name, filterfn in self._datafilters.iteritems():
929 if cmd.startswith(name):
929 if cmd.startswith(name):
930 fn = filterfn
930 fn = filterfn
931 params = cmd[len(name):].lstrip()
931 params = cmd[len(name):].lstrip()
932 break
932 break
933 if not fn:
933 if not fn:
934 fn = lambda s, c, **kwargs: util.filter(s, c)
934 fn = lambda s, c, **kwargs: util.filter(s, c)
935 # Wrap old filters not supporting keyword arguments
935 # Wrap old filters not supporting keyword arguments
936 if not inspect.getargspec(fn)[2]:
936 if not inspect.getargspec(fn)[2]:
937 oldfn = fn
937 oldfn = fn
938 fn = lambda s, c, **kwargs: oldfn(s, c)
938 fn = lambda s, c, **kwargs: oldfn(s, c)
939 l.append((mf, fn, params))
939 l.append((mf, fn, params))
940 self.filterpats[filter] = l
940 self.filterpats[filter] = l
941 return self.filterpats[filter]
941 return self.filterpats[filter]
942
942
943 def _filter(self, filterpats, filename, data):
943 def _filter(self, filterpats, filename, data):
944 for mf, fn, cmd in filterpats:
944 for mf, fn, cmd in filterpats:
945 if mf(filename):
945 if mf(filename):
946 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
946 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
947 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
947 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
948 break
948 break
949
949
950 return data
950 return data
951
951
952 @unfilteredpropertycache
952 @unfilteredpropertycache
953 def _encodefilterpats(self):
953 def _encodefilterpats(self):
954 return self._loadfilter('encode')
954 return self._loadfilter('encode')
955
955
956 @unfilteredpropertycache
956 @unfilteredpropertycache
957 def _decodefilterpats(self):
957 def _decodefilterpats(self):
958 return self._loadfilter('decode')
958 return self._loadfilter('decode')
959
959
960 def adddatafilter(self, name, filter):
960 def adddatafilter(self, name, filter):
961 self._datafilters[name] = filter
961 self._datafilters[name] = filter
962
962
963 def wread(self, filename):
963 def wread(self, filename):
964 if self._link(filename):
964 if self._link(filename):
965 data = self.wvfs.readlink(filename)
965 data = self.wvfs.readlink(filename)
966 else:
966 else:
967 data = self.wvfs.read(filename)
967 data = self.wvfs.read(filename)
968 return self._filter(self._encodefilterpats, filename, data)
968 return self._filter(self._encodefilterpats, filename, data)
969
969
970 def wwrite(self, filename, data, flags, backgroundclose=False):
970 def wwrite(self, filename, data, flags, backgroundclose=False):
971 """write ``data`` into ``filename`` in the working directory
971 """write ``data`` into ``filename`` in the working directory
972
972
973 This returns length of written (maybe decoded) data.
973 This returns length of written (maybe decoded) data.
974 """
974 """
975 data = self._filter(self._decodefilterpats, filename, data)
975 data = self._filter(self._decodefilterpats, filename, data)
976 if 'l' in flags:
976 if 'l' in flags:
977 self.wvfs.symlink(data, filename)
977 self.wvfs.symlink(data, filename)
978 else:
978 else:
979 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
979 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
980 if 'x' in flags:
980 if 'x' in flags:
981 self.wvfs.setflags(filename, False, True)
981 self.wvfs.setflags(filename, False, True)
982 return len(data)
982 return len(data)
983
983
984 def wwritedata(self, filename, data):
984 def wwritedata(self, filename, data):
985 return self._filter(self._decodefilterpats, filename, data)
985 return self._filter(self._decodefilterpats, filename, data)
986
986
987 def currenttransaction(self):
987 def currenttransaction(self):
988 """return the current transaction or None if non exists"""
988 """return the current transaction or None if non exists"""
989 if self._transref:
989 if self._transref:
990 tr = self._transref()
990 tr = self._transref()
991 else:
991 else:
992 tr = None
992 tr = None
993
993
994 if tr and tr.running():
994 if tr and tr.running():
995 return tr
995 return tr
996 return None
996 return None
997
997
998 def transaction(self, desc, report=None):
998 def transaction(self, desc, report=None):
999 if (self.ui.configbool('devel', 'all-warnings')
999 if (self.ui.configbool('devel', 'all-warnings')
1000 or self.ui.configbool('devel', 'check-locks')):
1000 or self.ui.configbool('devel', 'check-locks')):
1001 l = self._lockref and self._lockref()
1001 l = self._lockref and self._lockref()
1002 if l is None or not l.held:
1002 if l is None or not l.held:
1003 self.ui.develwarn('transaction with no lock')
1003 raise RuntimeError('programming error: transaction requires '
1004 'locking')
1004 tr = self.currenttransaction()
1005 tr = self.currenttransaction()
1005 if tr is not None:
1006 if tr is not None:
1006 return tr.nest()
1007 return tr.nest()
1007
1008
1008 # abort here if the journal already exists
1009 # abort here if the journal already exists
1009 if self.svfs.exists("journal"):
1010 if self.svfs.exists("journal"):
1010 raise error.RepoError(
1011 raise error.RepoError(
1011 _("abandoned transaction found"),
1012 _("abandoned transaction found"),
1012 hint=_("run 'hg recover' to clean up transaction"))
1013 hint=_("run 'hg recover' to clean up transaction"))
1013
1014
1014 # make journal.dirstate contain in-memory changes at this point
1015 # make journal.dirstate contain in-memory changes at this point
1015 self.dirstate.write(None)
1016 self.dirstate.write(None)
1016
1017
1017 idbase = "%.40f#%f" % (random.random(), time.time())
1018 idbase = "%.40f#%f" % (random.random(), time.time())
1018 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1019 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1019 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1020 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1020
1021
1021 self._writejournal(desc)
1022 self._writejournal(desc)
1022 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1023 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1023 if report:
1024 if report:
1024 rp = report
1025 rp = report
1025 else:
1026 else:
1026 rp = self.ui.warn
1027 rp = self.ui.warn
1027 vfsmap = {'plain': self.vfs} # root of .hg/
1028 vfsmap = {'plain': self.vfs} # root of .hg/
1028 # we must avoid cyclic reference between repo and transaction.
1029 # we must avoid cyclic reference between repo and transaction.
1029 reporef = weakref.ref(self)
1030 reporef = weakref.ref(self)
1030 def validate(tr):
1031 def validate(tr):
1031 """will run pre-closing hooks"""
1032 """will run pre-closing hooks"""
1032 reporef().hook('pretxnclose', throw=True,
1033 reporef().hook('pretxnclose', throw=True,
1033 txnname=desc, **tr.hookargs)
1034 txnname=desc, **tr.hookargs)
1034 def releasefn(tr, success):
1035 def releasefn(tr, success):
1035 repo = reporef()
1036 repo = reporef()
1036 if success:
1037 if success:
1037 # this should be explicitly invoked here, because
1038 # this should be explicitly invoked here, because
1038 # in-memory changes aren't written out at closing
1039 # in-memory changes aren't written out at closing
1039 # transaction, if tr.addfilegenerator (via
1040 # transaction, if tr.addfilegenerator (via
1040 # dirstate.write or so) isn't invoked while
1041 # dirstate.write or so) isn't invoked while
1041 # transaction running
1042 # transaction running
1042 repo.dirstate.write(None)
1043 repo.dirstate.write(None)
1043 else:
1044 else:
1044 # prevent in-memory changes from being written out at
1045 # prevent in-memory changes from being written out at
1045 # the end of outer wlock scope or so
1046 # the end of outer wlock scope or so
1046 repo.dirstate.invalidate()
1047 repo.dirstate.invalidate()
1047
1048
1048 # discard all changes (including ones already written
1049 # discard all changes (including ones already written
1049 # out) in this transaction
1050 # out) in this transaction
1050 repo.vfs.rename('journal.dirstate', 'dirstate')
1051 repo.vfs.rename('journal.dirstate', 'dirstate')
1051
1052
1052 repo.invalidate(clearfilecache=True)
1053 repo.invalidate(clearfilecache=True)
1053
1054
1054 tr = transaction.transaction(rp, self.svfs, vfsmap,
1055 tr = transaction.transaction(rp, self.svfs, vfsmap,
1055 "journal",
1056 "journal",
1056 "undo",
1057 "undo",
1057 aftertrans(renames),
1058 aftertrans(renames),
1058 self.store.createmode,
1059 self.store.createmode,
1059 validator=validate,
1060 validator=validate,
1060 releasefn=releasefn)
1061 releasefn=releasefn)
1061
1062
1062 tr.hookargs['txnid'] = txnid
1063 tr.hookargs['txnid'] = txnid
1063 # note: writing the fncache only during finalize mean that the file is
1064 # note: writing the fncache only during finalize mean that the file is
1064 # outdated when running hooks. As fncache is used for streaming clone,
1065 # outdated when running hooks. As fncache is used for streaming clone,
1065 # this is not expected to break anything that happen during the hooks.
1066 # this is not expected to break anything that happen during the hooks.
1066 tr.addfinalize('flush-fncache', self.store.write)
1067 tr.addfinalize('flush-fncache', self.store.write)
1067 def txnclosehook(tr2):
1068 def txnclosehook(tr2):
1068 """To be run if transaction is successful, will schedule a hook run
1069 """To be run if transaction is successful, will schedule a hook run
1069 """
1070 """
1070 # Don't reference tr2 in hook() so we don't hold a reference.
1071 # Don't reference tr2 in hook() so we don't hold a reference.
1071 # This reduces memory consumption when there are multiple
1072 # This reduces memory consumption when there are multiple
1072 # transactions per lock. This can likely go away if issue5045
1073 # transactions per lock. This can likely go away if issue5045
1073 # fixes the function accumulation.
1074 # fixes the function accumulation.
1074 hookargs = tr2.hookargs
1075 hookargs = tr2.hookargs
1075
1076
1076 def hook():
1077 def hook():
1077 reporef().hook('txnclose', throw=False, txnname=desc,
1078 reporef().hook('txnclose', throw=False, txnname=desc,
1078 **hookargs)
1079 **hookargs)
1079 reporef()._afterlock(hook)
1080 reporef()._afterlock(hook)
1080 tr.addfinalize('txnclose-hook', txnclosehook)
1081 tr.addfinalize('txnclose-hook', txnclosehook)
1081 def txnaborthook(tr2):
1082 def txnaborthook(tr2):
1082 """To be run if transaction is aborted
1083 """To be run if transaction is aborted
1083 """
1084 """
1084 reporef().hook('txnabort', throw=False, txnname=desc,
1085 reporef().hook('txnabort', throw=False, txnname=desc,
1085 **tr2.hookargs)
1086 **tr2.hookargs)
1086 tr.addabort('txnabort-hook', txnaborthook)
1087 tr.addabort('txnabort-hook', txnaborthook)
1087 # avoid eager cache invalidation. in-memory data should be identical
1088 # avoid eager cache invalidation. in-memory data should be identical
1088 # to stored data if transaction has no error.
1089 # to stored data if transaction has no error.
1089 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1090 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1090 self._transref = weakref.ref(tr)
1091 self._transref = weakref.ref(tr)
1091 return tr
1092 return tr
1092
1093
1093 def _journalfiles(self):
1094 def _journalfiles(self):
1094 return ((self.svfs, 'journal'),
1095 return ((self.svfs, 'journal'),
1095 (self.vfs, 'journal.dirstate'),
1096 (self.vfs, 'journal.dirstate'),
1096 (self.vfs, 'journal.branch'),
1097 (self.vfs, 'journal.branch'),
1097 (self.vfs, 'journal.desc'),
1098 (self.vfs, 'journal.desc'),
1098 (self.vfs, 'journal.bookmarks'),
1099 (self.vfs, 'journal.bookmarks'),
1099 (self.svfs, 'journal.phaseroots'))
1100 (self.svfs, 'journal.phaseroots'))
1100
1101
1101 def undofiles(self):
1102 def undofiles(self):
1102 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1103 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1103
1104
1104 def _writejournal(self, desc):
1105 def _writejournal(self, desc):
1105 self.vfs.write("journal.dirstate",
1106 self.vfs.write("journal.dirstate",
1106 self.vfs.tryread("dirstate"))
1107 self.vfs.tryread("dirstate"))
1107 self.vfs.write("journal.branch",
1108 self.vfs.write("journal.branch",
1108 encoding.fromlocal(self.dirstate.branch()))
1109 encoding.fromlocal(self.dirstate.branch()))
1109 self.vfs.write("journal.desc",
1110 self.vfs.write("journal.desc",
1110 "%d\n%s\n" % (len(self), desc))
1111 "%d\n%s\n" % (len(self), desc))
1111 self.vfs.write("journal.bookmarks",
1112 self.vfs.write("journal.bookmarks",
1112 self.vfs.tryread("bookmarks"))
1113 self.vfs.tryread("bookmarks"))
1113 self.svfs.write("journal.phaseroots",
1114 self.svfs.write("journal.phaseroots",
1114 self.svfs.tryread("phaseroots"))
1115 self.svfs.tryread("phaseroots"))
1115
1116
1116 def recover(self):
1117 def recover(self):
1117 with self.lock():
1118 with self.lock():
1118 if self.svfs.exists("journal"):
1119 if self.svfs.exists("journal"):
1119 self.ui.status(_("rolling back interrupted transaction\n"))
1120 self.ui.status(_("rolling back interrupted transaction\n"))
1120 vfsmap = {'': self.svfs,
1121 vfsmap = {'': self.svfs,
1121 'plain': self.vfs,}
1122 'plain': self.vfs,}
1122 transaction.rollback(self.svfs, vfsmap, "journal",
1123 transaction.rollback(self.svfs, vfsmap, "journal",
1123 self.ui.warn)
1124 self.ui.warn)
1124 self.invalidate()
1125 self.invalidate()
1125 return True
1126 return True
1126 else:
1127 else:
1127 self.ui.warn(_("no interrupted transaction available\n"))
1128 self.ui.warn(_("no interrupted transaction available\n"))
1128 return False
1129 return False
1129
1130
1130 def rollback(self, dryrun=False, force=False):
1131 def rollback(self, dryrun=False, force=False):
1131 wlock = lock = dsguard = None
1132 wlock = lock = dsguard = None
1132 try:
1133 try:
1133 wlock = self.wlock()
1134 wlock = self.wlock()
1134 lock = self.lock()
1135 lock = self.lock()
1135 if self.svfs.exists("undo"):
1136 if self.svfs.exists("undo"):
1136 dsguard = cmdutil.dirstateguard(self, 'rollback')
1137 dsguard = cmdutil.dirstateguard(self, 'rollback')
1137
1138
1138 return self._rollback(dryrun, force, dsguard)
1139 return self._rollback(dryrun, force, dsguard)
1139 else:
1140 else:
1140 self.ui.warn(_("no rollback information available\n"))
1141 self.ui.warn(_("no rollback information available\n"))
1141 return 1
1142 return 1
1142 finally:
1143 finally:
1143 release(dsguard, lock, wlock)
1144 release(dsguard, lock, wlock)
1144
1145
1145 @unfilteredmethod # Until we get smarter cache management
1146 @unfilteredmethod # Until we get smarter cache management
1146 def _rollback(self, dryrun, force, dsguard):
1147 def _rollback(self, dryrun, force, dsguard):
1147 ui = self.ui
1148 ui = self.ui
1148 try:
1149 try:
1149 args = self.vfs.read('undo.desc').splitlines()
1150 args = self.vfs.read('undo.desc').splitlines()
1150 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1151 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1151 if len(args) >= 3:
1152 if len(args) >= 3:
1152 detail = args[2]
1153 detail = args[2]
1153 oldtip = oldlen - 1
1154 oldtip = oldlen - 1
1154
1155
1155 if detail and ui.verbose:
1156 if detail and ui.verbose:
1156 msg = (_('repository tip rolled back to revision %s'
1157 msg = (_('repository tip rolled back to revision %s'
1157 ' (undo %s: %s)\n')
1158 ' (undo %s: %s)\n')
1158 % (oldtip, desc, detail))
1159 % (oldtip, desc, detail))
1159 else:
1160 else:
1160 msg = (_('repository tip rolled back to revision %s'
1161 msg = (_('repository tip rolled back to revision %s'
1161 ' (undo %s)\n')
1162 ' (undo %s)\n')
1162 % (oldtip, desc))
1163 % (oldtip, desc))
1163 except IOError:
1164 except IOError:
1164 msg = _('rolling back unknown transaction\n')
1165 msg = _('rolling back unknown transaction\n')
1165 desc = None
1166 desc = None
1166
1167
1167 if not force and self['.'] != self['tip'] and desc == 'commit':
1168 if not force and self['.'] != self['tip'] and desc == 'commit':
1168 raise error.Abort(
1169 raise error.Abort(
1169 _('rollback of last commit while not checked out '
1170 _('rollback of last commit while not checked out '
1170 'may lose data'), hint=_('use -f to force'))
1171 'may lose data'), hint=_('use -f to force'))
1171
1172
1172 ui.status(msg)
1173 ui.status(msg)
1173 if dryrun:
1174 if dryrun:
1174 return 0
1175 return 0
1175
1176
1176 parents = self.dirstate.parents()
1177 parents = self.dirstate.parents()
1177 self.destroying()
1178 self.destroying()
1178 vfsmap = {'plain': self.vfs, '': self.svfs}
1179 vfsmap = {'plain': self.vfs, '': self.svfs}
1179 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1180 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1180 if self.vfs.exists('undo.bookmarks'):
1181 if self.vfs.exists('undo.bookmarks'):
1181 self.vfs.rename('undo.bookmarks', 'bookmarks')
1182 self.vfs.rename('undo.bookmarks', 'bookmarks')
1182 if self.svfs.exists('undo.phaseroots'):
1183 if self.svfs.exists('undo.phaseroots'):
1183 self.svfs.rename('undo.phaseroots', 'phaseroots')
1184 self.svfs.rename('undo.phaseroots', 'phaseroots')
1184 self.invalidate()
1185 self.invalidate()
1185
1186
1186 parentgone = (parents[0] not in self.changelog.nodemap or
1187 parentgone = (parents[0] not in self.changelog.nodemap or
1187 parents[1] not in self.changelog.nodemap)
1188 parents[1] not in self.changelog.nodemap)
1188 if parentgone:
1189 if parentgone:
1189 # prevent dirstateguard from overwriting already restored one
1190 # prevent dirstateguard from overwriting already restored one
1190 dsguard.close()
1191 dsguard.close()
1191
1192
1192 self.vfs.rename('undo.dirstate', 'dirstate')
1193 self.vfs.rename('undo.dirstate', 'dirstate')
1193 try:
1194 try:
1194 branch = self.vfs.read('undo.branch')
1195 branch = self.vfs.read('undo.branch')
1195 self.dirstate.setbranch(encoding.tolocal(branch))
1196 self.dirstate.setbranch(encoding.tolocal(branch))
1196 except IOError:
1197 except IOError:
1197 ui.warn(_('named branch could not be reset: '
1198 ui.warn(_('named branch could not be reset: '
1198 'current branch is still \'%s\'\n')
1199 'current branch is still \'%s\'\n')
1199 % self.dirstate.branch())
1200 % self.dirstate.branch())
1200
1201
1201 self.dirstate.invalidate()
1202 self.dirstate.invalidate()
1202 parents = tuple([p.rev() for p in self[None].parents()])
1203 parents = tuple([p.rev() for p in self[None].parents()])
1203 if len(parents) > 1:
1204 if len(parents) > 1:
1204 ui.status(_('working directory now based on '
1205 ui.status(_('working directory now based on '
1205 'revisions %d and %d\n') % parents)
1206 'revisions %d and %d\n') % parents)
1206 else:
1207 else:
1207 ui.status(_('working directory now based on '
1208 ui.status(_('working directory now based on '
1208 'revision %d\n') % parents)
1209 'revision %d\n') % parents)
1209 mergemod.mergestate.clean(self, self['.'].node())
1210 mergemod.mergestate.clean(self, self['.'].node())
1210
1211
1211 # TODO: if we know which new heads may result from this rollback, pass
1212 # TODO: if we know which new heads may result from this rollback, pass
1212 # them to destroy(), which will prevent the branchhead cache from being
1213 # them to destroy(), which will prevent the branchhead cache from being
1213 # invalidated.
1214 # invalidated.
1214 self.destroyed()
1215 self.destroyed()
1215 return 0
1216 return 0
1216
1217
1217 def invalidatecaches(self):
1218 def invalidatecaches(self):
1218
1219
1219 if '_tagscache' in vars(self):
1220 if '_tagscache' in vars(self):
1220 # can't use delattr on proxy
1221 # can't use delattr on proxy
1221 del self.__dict__['_tagscache']
1222 del self.__dict__['_tagscache']
1222
1223
1223 self.unfiltered()._branchcaches.clear()
1224 self.unfiltered()._branchcaches.clear()
1224 self.invalidatevolatilesets()
1225 self.invalidatevolatilesets()
1225
1226
1226 def invalidatevolatilesets(self):
1227 def invalidatevolatilesets(self):
1227 self.filteredrevcache.clear()
1228 self.filteredrevcache.clear()
1228 obsolete.clearobscaches(self)
1229 obsolete.clearobscaches(self)
1229
1230
1230 def invalidatedirstate(self):
1231 def invalidatedirstate(self):
1231 '''Invalidates the dirstate, causing the next call to dirstate
1232 '''Invalidates the dirstate, causing the next call to dirstate
1232 to check if it was modified since the last time it was read,
1233 to check if it was modified since the last time it was read,
1233 rereading it if it has.
1234 rereading it if it has.
1234
1235
1235 This is different to dirstate.invalidate() that it doesn't always
1236 This is different to dirstate.invalidate() that it doesn't always
1236 rereads the dirstate. Use dirstate.invalidate() if you want to
1237 rereads the dirstate. Use dirstate.invalidate() if you want to
1237 explicitly read the dirstate again (i.e. restoring it to a previous
1238 explicitly read the dirstate again (i.e. restoring it to a previous
1238 known good state).'''
1239 known good state).'''
1239 if hasunfilteredcache(self, 'dirstate'):
1240 if hasunfilteredcache(self, 'dirstate'):
1240 for k in self.dirstate._filecache:
1241 for k in self.dirstate._filecache:
1241 try:
1242 try:
1242 delattr(self.dirstate, k)
1243 delattr(self.dirstate, k)
1243 except AttributeError:
1244 except AttributeError:
1244 pass
1245 pass
1245 delattr(self.unfiltered(), 'dirstate')
1246 delattr(self.unfiltered(), 'dirstate')
1246
1247
1247 def invalidate(self, clearfilecache=False):
1248 def invalidate(self, clearfilecache=False):
1248 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1249 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1249 for k in self._filecache.keys():
1250 for k in self._filecache.keys():
1250 # dirstate is invalidated separately in invalidatedirstate()
1251 # dirstate is invalidated separately in invalidatedirstate()
1251 if k == 'dirstate':
1252 if k == 'dirstate':
1252 continue
1253 continue
1253
1254
1254 if clearfilecache:
1255 if clearfilecache:
1255 del self._filecache[k]
1256 del self._filecache[k]
1256 try:
1257 try:
1257 delattr(unfiltered, k)
1258 delattr(unfiltered, k)
1258 except AttributeError:
1259 except AttributeError:
1259 pass
1260 pass
1260 self.invalidatecaches()
1261 self.invalidatecaches()
1261 self.store.invalidatecaches()
1262 self.store.invalidatecaches()
1262
1263
1263 def invalidateall(self):
1264 def invalidateall(self):
1264 '''Fully invalidates both store and non-store parts, causing the
1265 '''Fully invalidates both store and non-store parts, causing the
1265 subsequent operation to reread any outside changes.'''
1266 subsequent operation to reread any outside changes.'''
1266 # extension should hook this to invalidate its caches
1267 # extension should hook this to invalidate its caches
1267 self.invalidate()
1268 self.invalidate()
1268 self.invalidatedirstate()
1269 self.invalidatedirstate()
1269
1270
1270 def _refreshfilecachestats(self, tr):
1271 def _refreshfilecachestats(self, tr):
1271 """Reload stats of cached files so that they are flagged as valid"""
1272 """Reload stats of cached files so that they are flagged as valid"""
1272 for k, ce in self._filecache.items():
1273 for k, ce in self._filecache.items():
1273 if k == 'dirstate' or k not in self.__dict__:
1274 if k == 'dirstate' or k not in self.__dict__:
1274 continue
1275 continue
1275 ce.refresh()
1276 ce.refresh()
1276
1277
1277 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1278 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1278 inheritchecker=None, parentenvvar=None):
1279 inheritchecker=None, parentenvvar=None):
1279 parentlock = None
1280 parentlock = None
1280 # the contents of parentenvvar are used by the underlying lock to
1281 # the contents of parentenvvar are used by the underlying lock to
1281 # determine whether it can be inherited
1282 # determine whether it can be inherited
1282 if parentenvvar is not None:
1283 if parentenvvar is not None:
1283 parentlock = os.environ.get(parentenvvar)
1284 parentlock = os.environ.get(parentenvvar)
1284 try:
1285 try:
1285 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1286 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1286 acquirefn=acquirefn, desc=desc,
1287 acquirefn=acquirefn, desc=desc,
1287 inheritchecker=inheritchecker,
1288 inheritchecker=inheritchecker,
1288 parentlock=parentlock)
1289 parentlock=parentlock)
1289 except error.LockHeld as inst:
1290 except error.LockHeld as inst:
1290 if not wait:
1291 if not wait:
1291 raise
1292 raise
1292 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1293 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1293 (desc, inst.locker))
1294 (desc, inst.locker))
1294 # default to 600 seconds timeout
1295 # default to 600 seconds timeout
1295 l = lockmod.lock(vfs, lockname,
1296 l = lockmod.lock(vfs, lockname,
1296 int(self.ui.config("ui", "timeout", "600")),
1297 int(self.ui.config("ui", "timeout", "600")),
1297 releasefn=releasefn, acquirefn=acquirefn,
1298 releasefn=releasefn, acquirefn=acquirefn,
1298 desc=desc)
1299 desc=desc)
1299 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1300 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1300 return l
1301 return l
1301
1302
1302 def _afterlock(self, callback):
1303 def _afterlock(self, callback):
1303 """add a callback to be run when the repository is fully unlocked
1304 """add a callback to be run when the repository is fully unlocked
1304
1305
1305 The callback will be executed when the outermost lock is released
1306 The callback will be executed when the outermost lock is released
1306 (with wlock being higher level than 'lock')."""
1307 (with wlock being higher level than 'lock')."""
1307 for ref in (self._wlockref, self._lockref):
1308 for ref in (self._wlockref, self._lockref):
1308 l = ref and ref()
1309 l = ref and ref()
1309 if l and l.held:
1310 if l and l.held:
1310 l.postrelease.append(callback)
1311 l.postrelease.append(callback)
1311 break
1312 break
1312 else: # no lock have been found.
1313 else: # no lock have been found.
1313 callback()
1314 callback()
1314
1315
1315 def lock(self, wait=True):
1316 def lock(self, wait=True):
1316 '''Lock the repository store (.hg/store) and return a weak reference
1317 '''Lock the repository store (.hg/store) and return a weak reference
1317 to the lock. Use this before modifying the store (e.g. committing or
1318 to the lock. Use this before modifying the store (e.g. committing or
1318 stripping). If you are opening a transaction, get a lock as well.)
1319 stripping). If you are opening a transaction, get a lock as well.)
1319
1320
1320 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1321 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1321 'wlock' first to avoid a dead-lock hazard.'''
1322 'wlock' first to avoid a dead-lock hazard.'''
1322 l = self._lockref and self._lockref()
1323 l = self._lockref and self._lockref()
1323 if l is not None and l.held:
1324 if l is not None and l.held:
1324 l.lock()
1325 l.lock()
1325 return l
1326 return l
1326
1327
1327 l = self._lock(self.svfs, "lock", wait, None,
1328 l = self._lock(self.svfs, "lock", wait, None,
1328 self.invalidate, _('repository %s') % self.origroot)
1329 self.invalidate, _('repository %s') % self.origroot)
1329 self._lockref = weakref.ref(l)
1330 self._lockref = weakref.ref(l)
1330 return l
1331 return l
1331
1332
1332 def _wlockchecktransaction(self):
1333 def _wlockchecktransaction(self):
1333 if self.currenttransaction() is not None:
1334 if self.currenttransaction() is not None:
1334 raise error.LockInheritanceContractViolation(
1335 raise error.LockInheritanceContractViolation(
1335 'wlock cannot be inherited in the middle of a transaction')
1336 'wlock cannot be inherited in the middle of a transaction')
1336
1337
1337 def wlock(self, wait=True):
1338 def wlock(self, wait=True):
1338 '''Lock the non-store parts of the repository (everything under
1339 '''Lock the non-store parts of the repository (everything under
1339 .hg except .hg/store) and return a weak reference to the lock.
1340 .hg except .hg/store) and return a weak reference to the lock.
1340
1341
1341 Use this before modifying files in .hg.
1342 Use this before modifying files in .hg.
1342
1343
1343 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1344 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1344 'wlock' first to avoid a dead-lock hazard.'''
1345 'wlock' first to avoid a dead-lock hazard.'''
1345 l = self._wlockref and self._wlockref()
1346 l = self._wlockref and self._wlockref()
1346 if l is not None and l.held:
1347 if l is not None and l.held:
1347 l.lock()
1348 l.lock()
1348 return l
1349 return l
1349
1350
1350 # We do not need to check for non-waiting lock acquisition. Such
1351 # We do not need to check for non-waiting lock acquisition. Such
1351 # acquisition would not cause dead-lock as they would just fail.
1352 # acquisition would not cause dead-lock as they would just fail.
1352 if wait and (self.ui.configbool('devel', 'all-warnings')
1353 if wait and (self.ui.configbool('devel', 'all-warnings')
1353 or self.ui.configbool('devel', 'check-locks')):
1354 or self.ui.configbool('devel', 'check-locks')):
1354 l = self._lockref and self._lockref()
1355 l = self._lockref and self._lockref()
1355 if l is not None and l.held:
1356 if l is not None and l.held:
1356 self.ui.develwarn('"wlock" acquired after "lock"')
1357 self.ui.develwarn('"wlock" acquired after "lock"')
1357
1358
1358 def unlock():
1359 def unlock():
1359 if self.dirstate.pendingparentchange():
1360 if self.dirstate.pendingparentchange():
1360 self.dirstate.invalidate()
1361 self.dirstate.invalidate()
1361 else:
1362 else:
1362 self.dirstate.write(None)
1363 self.dirstate.write(None)
1363
1364
1364 self._filecache['dirstate'].refresh()
1365 self._filecache['dirstate'].refresh()
1365
1366
1366 l = self._lock(self.vfs, "wlock", wait, unlock,
1367 l = self._lock(self.vfs, "wlock", wait, unlock,
1367 self.invalidatedirstate, _('working directory of %s') %
1368 self.invalidatedirstate, _('working directory of %s') %
1368 self.origroot,
1369 self.origroot,
1369 inheritchecker=self._wlockchecktransaction,
1370 inheritchecker=self._wlockchecktransaction,
1370 parentenvvar='HG_WLOCK_LOCKER')
1371 parentenvvar='HG_WLOCK_LOCKER')
1371 self._wlockref = weakref.ref(l)
1372 self._wlockref = weakref.ref(l)
1372 return l
1373 return l
1373
1374
1374 def _currentlock(self, lockref):
1375 def _currentlock(self, lockref):
1375 """Returns the lock if it's held, or None if it's not."""
1376 """Returns the lock if it's held, or None if it's not."""
1376 if lockref is None:
1377 if lockref is None:
1377 return None
1378 return None
1378 l = lockref()
1379 l = lockref()
1379 if l is None or not l.held:
1380 if l is None or not l.held:
1380 return None
1381 return None
1381 return l
1382 return l
1382
1383
1383 def currentwlock(self):
1384 def currentwlock(self):
1384 """Returns the wlock if it's held, or None if it's not."""
1385 """Returns the wlock if it's held, or None if it's not."""
1385 return self._currentlock(self._wlockref)
1386 return self._currentlock(self._wlockref)
1386
1387
1387 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1388 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1388 """
1389 """
1389 commit an individual file as part of a larger transaction
1390 commit an individual file as part of a larger transaction
1390 """
1391 """
1391
1392
1392 fname = fctx.path()
1393 fname = fctx.path()
1393 fparent1 = manifest1.get(fname, nullid)
1394 fparent1 = manifest1.get(fname, nullid)
1394 fparent2 = manifest2.get(fname, nullid)
1395 fparent2 = manifest2.get(fname, nullid)
1395 if isinstance(fctx, context.filectx):
1396 if isinstance(fctx, context.filectx):
1396 node = fctx.filenode()
1397 node = fctx.filenode()
1397 if node in [fparent1, fparent2]:
1398 if node in [fparent1, fparent2]:
1398 self.ui.debug('reusing %s filelog entry\n' % fname)
1399 self.ui.debug('reusing %s filelog entry\n' % fname)
1399 return node
1400 return node
1400
1401
1401 flog = self.file(fname)
1402 flog = self.file(fname)
1402 meta = {}
1403 meta = {}
1403 copy = fctx.renamed()
1404 copy = fctx.renamed()
1404 if copy and copy[0] != fname:
1405 if copy and copy[0] != fname:
1405 # Mark the new revision of this file as a copy of another
1406 # Mark the new revision of this file as a copy of another
1406 # file. This copy data will effectively act as a parent
1407 # file. This copy data will effectively act as a parent
1407 # of this new revision. If this is a merge, the first
1408 # of this new revision. If this is a merge, the first
1408 # parent will be the nullid (meaning "look up the copy data")
1409 # parent will be the nullid (meaning "look up the copy data")
1409 # and the second one will be the other parent. For example:
1410 # and the second one will be the other parent. For example:
1410 #
1411 #
1411 # 0 --- 1 --- 3 rev1 changes file foo
1412 # 0 --- 1 --- 3 rev1 changes file foo
1412 # \ / rev2 renames foo to bar and changes it
1413 # \ / rev2 renames foo to bar and changes it
1413 # \- 2 -/ rev3 should have bar with all changes and
1414 # \- 2 -/ rev3 should have bar with all changes and
1414 # should record that bar descends from
1415 # should record that bar descends from
1415 # bar in rev2 and foo in rev1
1416 # bar in rev2 and foo in rev1
1416 #
1417 #
1417 # this allows this merge to succeed:
1418 # this allows this merge to succeed:
1418 #
1419 #
1419 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1420 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1420 # \ / merging rev3 and rev4 should use bar@rev2
1421 # \ / merging rev3 and rev4 should use bar@rev2
1421 # \- 2 --- 4 as the merge base
1422 # \- 2 --- 4 as the merge base
1422 #
1423 #
1423
1424
1424 cfname = copy[0]
1425 cfname = copy[0]
1425 crev = manifest1.get(cfname)
1426 crev = manifest1.get(cfname)
1426 newfparent = fparent2
1427 newfparent = fparent2
1427
1428
1428 if manifest2: # branch merge
1429 if manifest2: # branch merge
1429 if fparent2 == nullid or crev is None: # copied on remote side
1430 if fparent2 == nullid or crev is None: # copied on remote side
1430 if cfname in manifest2:
1431 if cfname in manifest2:
1431 crev = manifest2[cfname]
1432 crev = manifest2[cfname]
1432 newfparent = fparent1
1433 newfparent = fparent1
1433
1434
1434 # Here, we used to search backwards through history to try to find
1435 # Here, we used to search backwards through history to try to find
1435 # where the file copy came from if the source of a copy was not in
1436 # where the file copy came from if the source of a copy was not in
1436 # the parent directory. However, this doesn't actually make sense to
1437 # the parent directory. However, this doesn't actually make sense to
1437 # do (what does a copy from something not in your working copy even
1438 # do (what does a copy from something not in your working copy even
1438 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1439 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1439 # the user that copy information was dropped, so if they didn't
1440 # the user that copy information was dropped, so if they didn't
1440 # expect this outcome it can be fixed, but this is the correct
1441 # expect this outcome it can be fixed, but this is the correct
1441 # behavior in this circumstance.
1442 # behavior in this circumstance.
1442
1443
1443 if crev:
1444 if crev:
1444 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1445 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1445 meta["copy"] = cfname
1446 meta["copy"] = cfname
1446 meta["copyrev"] = hex(crev)
1447 meta["copyrev"] = hex(crev)
1447 fparent1, fparent2 = nullid, newfparent
1448 fparent1, fparent2 = nullid, newfparent
1448 else:
1449 else:
1449 self.ui.warn(_("warning: can't find ancestor for '%s' "
1450 self.ui.warn(_("warning: can't find ancestor for '%s' "
1450 "copied from '%s'!\n") % (fname, cfname))
1451 "copied from '%s'!\n") % (fname, cfname))
1451
1452
1452 elif fparent1 == nullid:
1453 elif fparent1 == nullid:
1453 fparent1, fparent2 = fparent2, nullid
1454 fparent1, fparent2 = fparent2, nullid
1454 elif fparent2 != nullid:
1455 elif fparent2 != nullid:
1455 # is one parent an ancestor of the other?
1456 # is one parent an ancestor of the other?
1456 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1457 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1457 if fparent1 in fparentancestors:
1458 if fparent1 in fparentancestors:
1458 fparent1, fparent2 = fparent2, nullid
1459 fparent1, fparent2 = fparent2, nullid
1459 elif fparent2 in fparentancestors:
1460 elif fparent2 in fparentancestors:
1460 fparent2 = nullid
1461 fparent2 = nullid
1461
1462
1462 # is the file changed?
1463 # is the file changed?
1463 text = fctx.data()
1464 text = fctx.data()
1464 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1465 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1465 changelist.append(fname)
1466 changelist.append(fname)
1466 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1467 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1467 # are just the flags changed during merge?
1468 # are just the flags changed during merge?
1468 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1469 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1469 changelist.append(fname)
1470 changelist.append(fname)
1470
1471
1471 return fparent1
1472 return fparent1
1472
1473
1473 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1474 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1474 """check for commit arguments that aren't commitable"""
1475 """check for commit arguments that aren't commitable"""
1475 if match.isexact() or match.prefix():
1476 if match.isexact() or match.prefix():
1476 matched = set(status.modified + status.added + status.removed)
1477 matched = set(status.modified + status.added + status.removed)
1477
1478
1478 for f in match.files():
1479 for f in match.files():
1479 f = self.dirstate.normalize(f)
1480 f = self.dirstate.normalize(f)
1480 if f == '.' or f in matched or f in wctx.substate:
1481 if f == '.' or f in matched or f in wctx.substate:
1481 continue
1482 continue
1482 if f in status.deleted:
1483 if f in status.deleted:
1483 fail(f, _('file not found!'))
1484 fail(f, _('file not found!'))
1484 if f in vdirs: # visited directory
1485 if f in vdirs: # visited directory
1485 d = f + '/'
1486 d = f + '/'
1486 for mf in matched:
1487 for mf in matched:
1487 if mf.startswith(d):
1488 if mf.startswith(d):
1488 break
1489 break
1489 else:
1490 else:
1490 fail(f, _("no match under directory!"))
1491 fail(f, _("no match under directory!"))
1491 elif f not in self.dirstate:
1492 elif f not in self.dirstate:
1492 fail(f, _("file not tracked!"))
1493 fail(f, _("file not tracked!"))
1493
1494
1494 @unfilteredmethod
1495 @unfilteredmethod
1495 def commit(self, text="", user=None, date=None, match=None, force=False,
1496 def commit(self, text="", user=None, date=None, match=None, force=False,
1496 editor=False, extra=None):
1497 editor=False, extra=None):
1497 """Add a new revision to current repository.
1498 """Add a new revision to current repository.
1498
1499
1499 Revision information is gathered from the working directory,
1500 Revision information is gathered from the working directory,
1500 match can be used to filter the committed files. If editor is
1501 match can be used to filter the committed files. If editor is
1501 supplied, it is called to get a commit message.
1502 supplied, it is called to get a commit message.
1502 """
1503 """
1503 if extra is None:
1504 if extra is None:
1504 extra = {}
1505 extra = {}
1505
1506
1506 def fail(f, msg):
1507 def fail(f, msg):
1507 raise error.Abort('%s: %s' % (f, msg))
1508 raise error.Abort('%s: %s' % (f, msg))
1508
1509
1509 if not match:
1510 if not match:
1510 match = matchmod.always(self.root, '')
1511 match = matchmod.always(self.root, '')
1511
1512
1512 if not force:
1513 if not force:
1513 vdirs = []
1514 vdirs = []
1514 match.explicitdir = vdirs.append
1515 match.explicitdir = vdirs.append
1515 match.bad = fail
1516 match.bad = fail
1516
1517
1517 wlock = lock = tr = None
1518 wlock = lock = tr = None
1518 try:
1519 try:
1519 wlock = self.wlock()
1520 wlock = self.wlock()
1520 lock = self.lock() # for recent changelog (see issue4368)
1521 lock = self.lock() # for recent changelog (see issue4368)
1521
1522
1522 wctx = self[None]
1523 wctx = self[None]
1523 merge = len(wctx.parents()) > 1
1524 merge = len(wctx.parents()) > 1
1524
1525
1525 if not force and merge and match.ispartial():
1526 if not force and merge and match.ispartial():
1526 raise error.Abort(_('cannot partially commit a merge '
1527 raise error.Abort(_('cannot partially commit a merge '
1527 '(do not specify files or patterns)'))
1528 '(do not specify files or patterns)'))
1528
1529
1529 status = self.status(match=match, clean=force)
1530 status = self.status(match=match, clean=force)
1530 if force:
1531 if force:
1531 status.modified.extend(status.clean) # mq may commit clean files
1532 status.modified.extend(status.clean) # mq may commit clean files
1532
1533
1533 # check subrepos
1534 # check subrepos
1534 subs = []
1535 subs = []
1535 commitsubs = set()
1536 commitsubs = set()
1536 newstate = wctx.substate.copy()
1537 newstate = wctx.substate.copy()
1537 # only manage subrepos and .hgsubstate if .hgsub is present
1538 # only manage subrepos and .hgsubstate if .hgsub is present
1538 if '.hgsub' in wctx:
1539 if '.hgsub' in wctx:
1539 # we'll decide whether to track this ourselves, thanks
1540 # we'll decide whether to track this ourselves, thanks
1540 for c in status.modified, status.added, status.removed:
1541 for c in status.modified, status.added, status.removed:
1541 if '.hgsubstate' in c:
1542 if '.hgsubstate' in c:
1542 c.remove('.hgsubstate')
1543 c.remove('.hgsubstate')
1543
1544
1544 # compare current state to last committed state
1545 # compare current state to last committed state
1545 # build new substate based on last committed state
1546 # build new substate based on last committed state
1546 oldstate = wctx.p1().substate
1547 oldstate = wctx.p1().substate
1547 for s in sorted(newstate.keys()):
1548 for s in sorted(newstate.keys()):
1548 if not match(s):
1549 if not match(s):
1549 # ignore working copy, use old state if present
1550 # ignore working copy, use old state if present
1550 if s in oldstate:
1551 if s in oldstate:
1551 newstate[s] = oldstate[s]
1552 newstate[s] = oldstate[s]
1552 continue
1553 continue
1553 if not force:
1554 if not force:
1554 raise error.Abort(
1555 raise error.Abort(
1555 _("commit with new subrepo %s excluded") % s)
1556 _("commit with new subrepo %s excluded") % s)
1556 dirtyreason = wctx.sub(s).dirtyreason(True)
1557 dirtyreason = wctx.sub(s).dirtyreason(True)
1557 if dirtyreason:
1558 if dirtyreason:
1558 if not self.ui.configbool('ui', 'commitsubrepos'):
1559 if not self.ui.configbool('ui', 'commitsubrepos'):
1559 raise error.Abort(dirtyreason,
1560 raise error.Abort(dirtyreason,
1560 hint=_("use --subrepos for recursive commit"))
1561 hint=_("use --subrepos for recursive commit"))
1561 subs.append(s)
1562 subs.append(s)
1562 commitsubs.add(s)
1563 commitsubs.add(s)
1563 else:
1564 else:
1564 bs = wctx.sub(s).basestate()
1565 bs = wctx.sub(s).basestate()
1565 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1566 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1566 if oldstate.get(s, (None, None, None))[1] != bs:
1567 if oldstate.get(s, (None, None, None))[1] != bs:
1567 subs.append(s)
1568 subs.append(s)
1568
1569
1569 # check for removed subrepos
1570 # check for removed subrepos
1570 for p in wctx.parents():
1571 for p in wctx.parents():
1571 r = [s for s in p.substate if s not in newstate]
1572 r = [s for s in p.substate if s not in newstate]
1572 subs += [s for s in r if match(s)]
1573 subs += [s for s in r if match(s)]
1573 if subs:
1574 if subs:
1574 if (not match('.hgsub') and
1575 if (not match('.hgsub') and
1575 '.hgsub' in (wctx.modified() + wctx.added())):
1576 '.hgsub' in (wctx.modified() + wctx.added())):
1576 raise error.Abort(
1577 raise error.Abort(
1577 _("can't commit subrepos without .hgsub"))
1578 _("can't commit subrepos without .hgsub"))
1578 status.modified.insert(0, '.hgsubstate')
1579 status.modified.insert(0, '.hgsubstate')
1579
1580
1580 elif '.hgsub' in status.removed:
1581 elif '.hgsub' in status.removed:
1581 # clean up .hgsubstate when .hgsub is removed
1582 # clean up .hgsubstate when .hgsub is removed
1582 if ('.hgsubstate' in wctx and
1583 if ('.hgsubstate' in wctx and
1583 '.hgsubstate' not in (status.modified + status.added +
1584 '.hgsubstate' not in (status.modified + status.added +
1584 status.removed)):
1585 status.removed)):
1585 status.removed.insert(0, '.hgsubstate')
1586 status.removed.insert(0, '.hgsubstate')
1586
1587
1587 # make sure all explicit patterns are matched
1588 # make sure all explicit patterns are matched
1588 if not force:
1589 if not force:
1589 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1590 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1590
1591
1591 cctx = context.workingcommitctx(self, status,
1592 cctx = context.workingcommitctx(self, status,
1592 text, user, date, extra)
1593 text, user, date, extra)
1593
1594
1594 # internal config: ui.allowemptycommit
1595 # internal config: ui.allowemptycommit
1595 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1596 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1596 or extra.get('close') or merge or cctx.files()
1597 or extra.get('close') or merge or cctx.files()
1597 or self.ui.configbool('ui', 'allowemptycommit'))
1598 or self.ui.configbool('ui', 'allowemptycommit'))
1598 if not allowemptycommit:
1599 if not allowemptycommit:
1599 return None
1600 return None
1600
1601
1601 if merge and cctx.deleted():
1602 if merge and cctx.deleted():
1602 raise error.Abort(_("cannot commit merge with missing files"))
1603 raise error.Abort(_("cannot commit merge with missing files"))
1603
1604
1604 ms = mergemod.mergestate.read(self)
1605 ms = mergemod.mergestate.read(self)
1605
1606
1606 if list(ms.unresolved()):
1607 if list(ms.unresolved()):
1607 raise error.Abort(_('unresolved merge conflicts '
1608 raise error.Abort(_('unresolved merge conflicts '
1608 '(see "hg help resolve")'))
1609 '(see "hg help resolve")'))
1609 if ms.mdstate() != 's' or list(ms.driverresolved()):
1610 if ms.mdstate() != 's' or list(ms.driverresolved()):
1610 raise error.Abort(_('driver-resolved merge conflicts'),
1611 raise error.Abort(_('driver-resolved merge conflicts'),
1611 hint=_('run "hg resolve --all" to resolve'))
1612 hint=_('run "hg resolve --all" to resolve'))
1612
1613
1613 if editor:
1614 if editor:
1614 cctx._text = editor(self, cctx, subs)
1615 cctx._text = editor(self, cctx, subs)
1615 edited = (text != cctx._text)
1616 edited = (text != cctx._text)
1616
1617
1617 # Save commit message in case this transaction gets rolled back
1618 # Save commit message in case this transaction gets rolled back
1618 # (e.g. by a pretxncommit hook). Leave the content alone on
1619 # (e.g. by a pretxncommit hook). Leave the content alone on
1619 # the assumption that the user will use the same editor again.
1620 # the assumption that the user will use the same editor again.
1620 msgfn = self.savecommitmessage(cctx._text)
1621 msgfn = self.savecommitmessage(cctx._text)
1621
1622
1622 # commit subs and write new state
1623 # commit subs and write new state
1623 if subs:
1624 if subs:
1624 for s in sorted(commitsubs):
1625 for s in sorted(commitsubs):
1625 sub = wctx.sub(s)
1626 sub = wctx.sub(s)
1626 self.ui.status(_('committing subrepository %s\n') %
1627 self.ui.status(_('committing subrepository %s\n') %
1627 subrepo.subrelpath(sub))
1628 subrepo.subrelpath(sub))
1628 sr = sub.commit(cctx._text, user, date)
1629 sr = sub.commit(cctx._text, user, date)
1629 newstate[s] = (newstate[s][0], sr)
1630 newstate[s] = (newstate[s][0], sr)
1630 subrepo.writestate(self, newstate)
1631 subrepo.writestate(self, newstate)
1631
1632
1632 p1, p2 = self.dirstate.parents()
1633 p1, p2 = self.dirstate.parents()
1633 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1634 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1634 try:
1635 try:
1635 self.hook("precommit", throw=True, parent1=hookp1,
1636 self.hook("precommit", throw=True, parent1=hookp1,
1636 parent2=hookp2)
1637 parent2=hookp2)
1637 tr = self.transaction('commit')
1638 tr = self.transaction('commit')
1638 ret = self.commitctx(cctx, True)
1639 ret = self.commitctx(cctx, True)
1639 except: # re-raises
1640 except: # re-raises
1640 if edited:
1641 if edited:
1641 self.ui.write(
1642 self.ui.write(
1642 _('note: commit message saved in %s\n') % msgfn)
1643 _('note: commit message saved in %s\n') % msgfn)
1643 raise
1644 raise
1644 # update bookmarks, dirstate and mergestate
1645 # update bookmarks, dirstate and mergestate
1645 bookmarks.update(self, [p1, p2], ret)
1646 bookmarks.update(self, [p1, p2], ret)
1646 cctx.markcommitted(ret)
1647 cctx.markcommitted(ret)
1647 ms.reset()
1648 ms.reset()
1648 tr.close()
1649 tr.close()
1649
1650
1650 finally:
1651 finally:
1651 lockmod.release(tr, lock, wlock)
1652 lockmod.release(tr, lock, wlock)
1652
1653
1653 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1654 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1654 # hack for command that use a temporary commit (eg: histedit)
1655 # hack for command that use a temporary commit (eg: histedit)
1655 # temporary commit got stripped before hook release
1656 # temporary commit got stripped before hook release
1656 if self.changelog.hasnode(ret):
1657 if self.changelog.hasnode(ret):
1657 self.hook("commit", node=node, parent1=parent1,
1658 self.hook("commit", node=node, parent1=parent1,
1658 parent2=parent2)
1659 parent2=parent2)
1659 self._afterlock(commithook)
1660 self._afterlock(commithook)
1660 return ret
1661 return ret
1661
1662
1662 @unfilteredmethod
1663 @unfilteredmethod
1663 def commitctx(self, ctx, error=False):
1664 def commitctx(self, ctx, error=False):
1664 """Add a new revision to current repository.
1665 """Add a new revision to current repository.
1665 Revision information is passed via the context argument.
1666 Revision information is passed via the context argument.
1666 """
1667 """
1667
1668
1668 tr = None
1669 tr = None
1669 p1, p2 = ctx.p1(), ctx.p2()
1670 p1, p2 = ctx.p1(), ctx.p2()
1670 user = ctx.user()
1671 user = ctx.user()
1671
1672
1672 lock = self.lock()
1673 lock = self.lock()
1673 try:
1674 try:
1674 tr = self.transaction("commit")
1675 tr = self.transaction("commit")
1675 trp = weakref.proxy(tr)
1676 trp = weakref.proxy(tr)
1676
1677
1677 if ctx.files():
1678 if ctx.files():
1678 m1 = p1.manifest()
1679 m1 = p1.manifest()
1679 m2 = p2.manifest()
1680 m2 = p2.manifest()
1680 m = m1.copy()
1681 m = m1.copy()
1681
1682
1682 # check in files
1683 # check in files
1683 added = []
1684 added = []
1684 changed = []
1685 changed = []
1685 removed = list(ctx.removed())
1686 removed = list(ctx.removed())
1686 linkrev = len(self)
1687 linkrev = len(self)
1687 self.ui.note(_("committing files:\n"))
1688 self.ui.note(_("committing files:\n"))
1688 for f in sorted(ctx.modified() + ctx.added()):
1689 for f in sorted(ctx.modified() + ctx.added()):
1689 self.ui.note(f + "\n")
1690 self.ui.note(f + "\n")
1690 try:
1691 try:
1691 fctx = ctx[f]
1692 fctx = ctx[f]
1692 if fctx is None:
1693 if fctx is None:
1693 removed.append(f)
1694 removed.append(f)
1694 else:
1695 else:
1695 added.append(f)
1696 added.append(f)
1696 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1697 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1697 trp, changed)
1698 trp, changed)
1698 m.setflag(f, fctx.flags())
1699 m.setflag(f, fctx.flags())
1699 except OSError as inst:
1700 except OSError as inst:
1700 self.ui.warn(_("trouble committing %s!\n") % f)
1701 self.ui.warn(_("trouble committing %s!\n") % f)
1701 raise
1702 raise
1702 except IOError as inst:
1703 except IOError as inst:
1703 errcode = getattr(inst, 'errno', errno.ENOENT)
1704 errcode = getattr(inst, 'errno', errno.ENOENT)
1704 if error or errcode and errcode != errno.ENOENT:
1705 if error or errcode and errcode != errno.ENOENT:
1705 self.ui.warn(_("trouble committing %s!\n") % f)
1706 self.ui.warn(_("trouble committing %s!\n") % f)
1706 raise
1707 raise
1707
1708
1708 # update manifest
1709 # update manifest
1709 self.ui.note(_("committing manifest\n"))
1710 self.ui.note(_("committing manifest\n"))
1710 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1711 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1711 drop = [f for f in removed if f in m]
1712 drop = [f for f in removed if f in m]
1712 for f in drop:
1713 for f in drop:
1713 del m[f]
1714 del m[f]
1714 mn = self.manifest.add(m, trp, linkrev,
1715 mn = self.manifest.add(m, trp, linkrev,
1715 p1.manifestnode(), p2.manifestnode(),
1716 p1.manifestnode(), p2.manifestnode(),
1716 added, drop)
1717 added, drop)
1717 files = changed + removed
1718 files = changed + removed
1718 else:
1719 else:
1719 mn = p1.manifestnode()
1720 mn = p1.manifestnode()
1720 files = []
1721 files = []
1721
1722
1722 # update changelog
1723 # update changelog
1723 self.ui.note(_("committing changelog\n"))
1724 self.ui.note(_("committing changelog\n"))
1724 self.changelog.delayupdate(tr)
1725 self.changelog.delayupdate(tr)
1725 n = self.changelog.add(mn, files, ctx.description(),
1726 n = self.changelog.add(mn, files, ctx.description(),
1726 trp, p1.node(), p2.node(),
1727 trp, p1.node(), p2.node(),
1727 user, ctx.date(), ctx.extra().copy())
1728 user, ctx.date(), ctx.extra().copy())
1728 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1729 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1729 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1730 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1730 parent2=xp2)
1731 parent2=xp2)
1731 # set the new commit is proper phase
1732 # set the new commit is proper phase
1732 targetphase = subrepo.newcommitphase(self.ui, ctx)
1733 targetphase = subrepo.newcommitphase(self.ui, ctx)
1733 if targetphase:
1734 if targetphase:
1734 # retract boundary do not alter parent changeset.
1735 # retract boundary do not alter parent changeset.
1735 # if a parent have higher the resulting phase will
1736 # if a parent have higher the resulting phase will
1736 # be compliant anyway
1737 # be compliant anyway
1737 #
1738 #
1738 # if minimal phase was 0 we don't need to retract anything
1739 # if minimal phase was 0 we don't need to retract anything
1739 phases.retractboundary(self, tr, targetphase, [n])
1740 phases.retractboundary(self, tr, targetphase, [n])
1740 tr.close()
1741 tr.close()
1741 branchmap.updatecache(self.filtered('served'))
1742 branchmap.updatecache(self.filtered('served'))
1742 return n
1743 return n
1743 finally:
1744 finally:
1744 if tr:
1745 if tr:
1745 tr.release()
1746 tr.release()
1746 lock.release()
1747 lock.release()
1747
1748
1748 @unfilteredmethod
1749 @unfilteredmethod
1749 def destroying(self):
1750 def destroying(self):
1750 '''Inform the repository that nodes are about to be destroyed.
1751 '''Inform the repository that nodes are about to be destroyed.
1751 Intended for use by strip and rollback, so there's a common
1752 Intended for use by strip and rollback, so there's a common
1752 place for anything that has to be done before destroying history.
1753 place for anything that has to be done before destroying history.
1753
1754
1754 This is mostly useful for saving state that is in memory and waiting
1755 This is mostly useful for saving state that is in memory and waiting
1755 to be flushed when the current lock is released. Because a call to
1756 to be flushed when the current lock is released. Because a call to
1756 destroyed is imminent, the repo will be invalidated causing those
1757 destroyed is imminent, the repo will be invalidated causing those
1757 changes to stay in memory (waiting for the next unlock), or vanish
1758 changes to stay in memory (waiting for the next unlock), or vanish
1758 completely.
1759 completely.
1759 '''
1760 '''
1760 # When using the same lock to commit and strip, the phasecache is left
1761 # When using the same lock to commit and strip, the phasecache is left
1761 # dirty after committing. Then when we strip, the repo is invalidated,
1762 # dirty after committing. Then when we strip, the repo is invalidated,
1762 # causing those changes to disappear.
1763 # causing those changes to disappear.
1763 if '_phasecache' in vars(self):
1764 if '_phasecache' in vars(self):
1764 self._phasecache.write()
1765 self._phasecache.write()
1765
1766
1766 @unfilteredmethod
1767 @unfilteredmethod
1767 def destroyed(self):
1768 def destroyed(self):
1768 '''Inform the repository that nodes have been destroyed.
1769 '''Inform the repository that nodes have been destroyed.
1769 Intended for use by strip and rollback, so there's a common
1770 Intended for use by strip and rollback, so there's a common
1770 place for anything that has to be done after destroying history.
1771 place for anything that has to be done after destroying history.
1771 '''
1772 '''
1772 # When one tries to:
1773 # When one tries to:
1773 # 1) destroy nodes thus calling this method (e.g. strip)
1774 # 1) destroy nodes thus calling this method (e.g. strip)
1774 # 2) use phasecache somewhere (e.g. commit)
1775 # 2) use phasecache somewhere (e.g. commit)
1775 #
1776 #
1776 # then 2) will fail because the phasecache contains nodes that were
1777 # then 2) will fail because the phasecache contains nodes that were
1777 # removed. We can either remove phasecache from the filecache,
1778 # removed. We can either remove phasecache from the filecache,
1778 # causing it to reload next time it is accessed, or simply filter
1779 # causing it to reload next time it is accessed, or simply filter
1779 # the removed nodes now and write the updated cache.
1780 # the removed nodes now and write the updated cache.
1780 self._phasecache.filterunknown(self)
1781 self._phasecache.filterunknown(self)
1781 self._phasecache.write()
1782 self._phasecache.write()
1782
1783
1783 # update the 'served' branch cache to help read only server process
1784 # update the 'served' branch cache to help read only server process
1784 # Thanks to branchcache collaboration this is done from the nearest
1785 # Thanks to branchcache collaboration this is done from the nearest
1785 # filtered subset and it is expected to be fast.
1786 # filtered subset and it is expected to be fast.
1786 branchmap.updatecache(self.filtered('served'))
1787 branchmap.updatecache(self.filtered('served'))
1787
1788
1788 # Ensure the persistent tag cache is updated. Doing it now
1789 # Ensure the persistent tag cache is updated. Doing it now
1789 # means that the tag cache only has to worry about destroyed
1790 # means that the tag cache only has to worry about destroyed
1790 # heads immediately after a strip/rollback. That in turn
1791 # heads immediately after a strip/rollback. That in turn
1791 # guarantees that "cachetip == currenttip" (comparing both rev
1792 # guarantees that "cachetip == currenttip" (comparing both rev
1792 # and node) always means no nodes have been added or destroyed.
1793 # and node) always means no nodes have been added or destroyed.
1793
1794
1794 # XXX this is suboptimal when qrefresh'ing: we strip the current
1795 # XXX this is suboptimal when qrefresh'ing: we strip the current
1795 # head, refresh the tag cache, then immediately add a new head.
1796 # head, refresh the tag cache, then immediately add a new head.
1796 # But I think doing it this way is necessary for the "instant
1797 # But I think doing it this way is necessary for the "instant
1797 # tag cache retrieval" case to work.
1798 # tag cache retrieval" case to work.
1798 self.invalidate()
1799 self.invalidate()
1799
1800
1800 def walk(self, match, node=None):
1801 def walk(self, match, node=None):
1801 '''
1802 '''
1802 walk recursively through the directory tree or a given
1803 walk recursively through the directory tree or a given
1803 changeset, finding all files matched by the match
1804 changeset, finding all files matched by the match
1804 function
1805 function
1805 '''
1806 '''
1806 return self[node].walk(match)
1807 return self[node].walk(match)
1807
1808
1808 def status(self, node1='.', node2=None, match=None,
1809 def status(self, node1='.', node2=None, match=None,
1809 ignored=False, clean=False, unknown=False,
1810 ignored=False, clean=False, unknown=False,
1810 listsubrepos=False):
1811 listsubrepos=False):
1811 '''a convenience method that calls node1.status(node2)'''
1812 '''a convenience method that calls node1.status(node2)'''
1812 return self[node1].status(node2, match, ignored, clean, unknown,
1813 return self[node1].status(node2, match, ignored, clean, unknown,
1813 listsubrepos)
1814 listsubrepos)
1814
1815
1815 def heads(self, start=None):
1816 def heads(self, start=None):
1816 heads = self.changelog.heads(start)
1817 heads = self.changelog.heads(start)
1817 # sort the output in rev descending order
1818 # sort the output in rev descending order
1818 return sorted(heads, key=self.changelog.rev, reverse=True)
1819 return sorted(heads, key=self.changelog.rev, reverse=True)
1819
1820
1820 def branchheads(self, branch=None, start=None, closed=False):
1821 def branchheads(self, branch=None, start=None, closed=False):
1821 '''return a (possibly filtered) list of heads for the given branch
1822 '''return a (possibly filtered) list of heads for the given branch
1822
1823
1823 Heads are returned in topological order, from newest to oldest.
1824 Heads are returned in topological order, from newest to oldest.
1824 If branch is None, use the dirstate branch.
1825 If branch is None, use the dirstate branch.
1825 If start is not None, return only heads reachable from start.
1826 If start is not None, return only heads reachable from start.
1826 If closed is True, return heads that are marked as closed as well.
1827 If closed is True, return heads that are marked as closed as well.
1827 '''
1828 '''
1828 if branch is None:
1829 if branch is None:
1829 branch = self[None].branch()
1830 branch = self[None].branch()
1830 branches = self.branchmap()
1831 branches = self.branchmap()
1831 if branch not in branches:
1832 if branch not in branches:
1832 return []
1833 return []
1833 # the cache returns heads ordered lowest to highest
1834 # the cache returns heads ordered lowest to highest
1834 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1835 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1835 if start is not None:
1836 if start is not None:
1836 # filter out the heads that cannot be reached from startrev
1837 # filter out the heads that cannot be reached from startrev
1837 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1838 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1838 bheads = [h for h in bheads if h in fbheads]
1839 bheads = [h for h in bheads if h in fbheads]
1839 return bheads
1840 return bheads
1840
1841
1841 def branches(self, nodes):
1842 def branches(self, nodes):
1842 if not nodes:
1843 if not nodes:
1843 nodes = [self.changelog.tip()]
1844 nodes = [self.changelog.tip()]
1844 b = []
1845 b = []
1845 for n in nodes:
1846 for n in nodes:
1846 t = n
1847 t = n
1847 while True:
1848 while True:
1848 p = self.changelog.parents(n)
1849 p = self.changelog.parents(n)
1849 if p[1] != nullid or p[0] == nullid:
1850 if p[1] != nullid or p[0] == nullid:
1850 b.append((t, n, p[0], p[1]))
1851 b.append((t, n, p[0], p[1]))
1851 break
1852 break
1852 n = p[0]
1853 n = p[0]
1853 return b
1854 return b
1854
1855
1855 def between(self, pairs):
1856 def between(self, pairs):
1856 r = []
1857 r = []
1857
1858
1858 for top, bottom in pairs:
1859 for top, bottom in pairs:
1859 n, l, i = top, [], 0
1860 n, l, i = top, [], 0
1860 f = 1
1861 f = 1
1861
1862
1862 while n != bottom and n != nullid:
1863 while n != bottom and n != nullid:
1863 p = self.changelog.parents(n)[0]
1864 p = self.changelog.parents(n)[0]
1864 if i == f:
1865 if i == f:
1865 l.append(n)
1866 l.append(n)
1866 f = f * 2
1867 f = f * 2
1867 n = p
1868 n = p
1868 i += 1
1869 i += 1
1869
1870
1870 r.append(l)
1871 r.append(l)
1871
1872
1872 return r
1873 return r
1873
1874
1874 def checkpush(self, pushop):
1875 def checkpush(self, pushop):
1875 """Extensions can override this function if additional checks have
1876 """Extensions can override this function if additional checks have
1876 to be performed before pushing, or call it if they override push
1877 to be performed before pushing, or call it if they override push
1877 command.
1878 command.
1878 """
1879 """
1879 pass
1880 pass
1880
1881
1881 @unfilteredpropertycache
1882 @unfilteredpropertycache
1882 def prepushoutgoinghooks(self):
1883 def prepushoutgoinghooks(self):
1883 """Return util.hooks consists of a pushop with repo, remote, outgoing
1884 """Return util.hooks consists of a pushop with repo, remote, outgoing
1884 methods, which are called before pushing changesets.
1885 methods, which are called before pushing changesets.
1885 """
1886 """
1886 return util.hooks()
1887 return util.hooks()
1887
1888
1888 def pushkey(self, namespace, key, old, new):
1889 def pushkey(self, namespace, key, old, new):
1889 try:
1890 try:
1890 tr = self.currenttransaction()
1891 tr = self.currenttransaction()
1891 hookargs = {}
1892 hookargs = {}
1892 if tr is not None:
1893 if tr is not None:
1893 hookargs.update(tr.hookargs)
1894 hookargs.update(tr.hookargs)
1894 hookargs['namespace'] = namespace
1895 hookargs['namespace'] = namespace
1895 hookargs['key'] = key
1896 hookargs['key'] = key
1896 hookargs['old'] = old
1897 hookargs['old'] = old
1897 hookargs['new'] = new
1898 hookargs['new'] = new
1898 self.hook('prepushkey', throw=True, **hookargs)
1899 self.hook('prepushkey', throw=True, **hookargs)
1899 except error.HookAbort as exc:
1900 except error.HookAbort as exc:
1900 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1901 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1901 if exc.hint:
1902 if exc.hint:
1902 self.ui.write_err(_("(%s)\n") % exc.hint)
1903 self.ui.write_err(_("(%s)\n") % exc.hint)
1903 return False
1904 return False
1904 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1905 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1905 ret = pushkey.push(self, namespace, key, old, new)
1906 ret = pushkey.push(self, namespace, key, old, new)
1906 def runhook():
1907 def runhook():
1907 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1908 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1908 ret=ret)
1909 ret=ret)
1909 self._afterlock(runhook)
1910 self._afterlock(runhook)
1910 return ret
1911 return ret
1911
1912
1912 def listkeys(self, namespace):
1913 def listkeys(self, namespace):
1913 self.hook('prelistkeys', throw=True, namespace=namespace)
1914 self.hook('prelistkeys', throw=True, namespace=namespace)
1914 self.ui.debug('listing keys for "%s"\n' % namespace)
1915 self.ui.debug('listing keys for "%s"\n' % namespace)
1915 values = pushkey.list(self, namespace)
1916 values = pushkey.list(self, namespace)
1916 self.hook('listkeys', namespace=namespace, values=values)
1917 self.hook('listkeys', namespace=namespace, values=values)
1917 return values
1918 return values
1918
1919
1919 def debugwireargs(self, one, two, three=None, four=None, five=None):
1920 def debugwireargs(self, one, two, three=None, four=None, five=None):
1920 '''used to test argument passing over the wire'''
1921 '''used to test argument passing over the wire'''
1921 return "%s %s %s %s %s" % (one, two, three, four, five)
1922 return "%s %s %s %s %s" % (one, two, three, four, five)
1922
1923
1923 def savecommitmessage(self, text):
1924 def savecommitmessage(self, text):
1924 fp = self.vfs('last-message.txt', 'wb')
1925 fp = self.vfs('last-message.txt', 'wb')
1925 try:
1926 try:
1926 fp.write(text)
1927 fp.write(text)
1927 finally:
1928 finally:
1928 fp.close()
1929 fp.close()
1929 return self.pathto(fp.name[len(self.root) + 1:])
1930 return self.pathto(fp.name[len(self.root) + 1:])
1930
1931
1931 # used to avoid circular references so destructors work
1932 # used to avoid circular references so destructors work
1932 def aftertrans(files):
1933 def aftertrans(files):
1933 renamefiles = [tuple(t) for t in files]
1934 renamefiles = [tuple(t) for t in files]
1934 def a():
1935 def a():
1935 for vfs, src, dest in renamefiles:
1936 for vfs, src, dest in renamefiles:
1936 try:
1937 try:
1937 vfs.rename(src, dest)
1938 vfs.rename(src, dest)
1938 except OSError: # journal file does not yet exist
1939 except OSError: # journal file does not yet exist
1939 pass
1940 pass
1940 return a
1941 return a
1941
1942
1942 def undoname(fn):
1943 def undoname(fn):
1943 base, name = os.path.split(fn)
1944 base, name = os.path.split(fn)
1944 assert name.startswith('journal')
1945 assert name.startswith('journal')
1945 return os.path.join(base, name.replace('journal', 'undo', 1))
1946 return os.path.join(base, name.replace('journal', 'undo', 1))
1946
1947
1947 def instance(ui, path, create):
1948 def instance(ui, path, create):
1948 return localrepository(ui, util.urllocalpath(path), create)
1949 return localrepository(ui, util.urllocalpath(path), create)
1949
1950
1950 def islocal(path):
1951 def islocal(path):
1951 return True
1952 return True
1952
1953
1953 def newreporequirements(repo):
1954 def newreporequirements(repo):
1954 """Determine the set of requirements for a new local repository.
1955 """Determine the set of requirements for a new local repository.
1955
1956
1956 Extensions can wrap this function to specify custom requirements for
1957 Extensions can wrap this function to specify custom requirements for
1957 new repositories.
1958 new repositories.
1958 """
1959 """
1959 ui = repo.ui
1960 ui = repo.ui
1960 requirements = set(['revlogv1'])
1961 requirements = set(['revlogv1'])
1961 if ui.configbool('format', 'usestore', True):
1962 if ui.configbool('format', 'usestore', True):
1962 requirements.add('store')
1963 requirements.add('store')
1963 if ui.configbool('format', 'usefncache', True):
1964 if ui.configbool('format', 'usefncache', True):
1964 requirements.add('fncache')
1965 requirements.add('fncache')
1965 if ui.configbool('format', 'dotencode', True):
1966 if ui.configbool('format', 'dotencode', True):
1966 requirements.add('dotencode')
1967 requirements.add('dotencode')
1967
1968
1968 if scmutil.gdinitconfig(ui):
1969 if scmutil.gdinitconfig(ui):
1969 requirements.add('generaldelta')
1970 requirements.add('generaldelta')
1970 if ui.configbool('experimental', 'treemanifest', False):
1971 if ui.configbool('experimental', 'treemanifest', False):
1971 requirements.add('treemanifest')
1972 requirements.add('treemanifest')
1972 if ui.configbool('experimental', 'manifestv2', False):
1973 if ui.configbool('experimental', 'manifestv2', False):
1973 requirements.add('manifestv2')
1974 requirements.add('manifestv2')
1974
1975
1975 return requirements
1976 return requirements
@@ -1,165 +1,173 b''
1
1
2 $ cat << EOF > buggylocking.py
2 $ cat << EOF > buggylocking.py
3 > """A small extension that tests our developer warnings
3 > """A small extension that tests our developer warnings
4 > """
4 > """
5 >
5 >
6 > from mercurial import cmdutil, repair, revset
6 > from mercurial import cmdutil, repair, revset
7 >
7 >
8 > cmdtable = {}
8 > cmdtable = {}
9 > command = cmdutil.command(cmdtable)
9 > command = cmdutil.command(cmdtable)
10 >
10 >
11 > @command('buggylocking', [], '')
11 > @command('buggylocking', [], '')
12 > def buggylocking(ui, repo):
12 > def buggylocking(ui, repo):
13 > lo = repo.lock()
13 > lo = repo.lock()
14 > wl = repo.wlock()
14 > wl = repo.wlock()
15 > wl.release()
15 > wl.release()
16 > lo.release()
16 > lo.release()
17 >
17 >
18 > @command('buggytransaction', [], '')
18 > @command('buggytransaction', [], '')
19 > def buggylocking(ui, repo):
19 > def buggylocking(ui, repo):
20 > tr = repo.transaction('buggy')
20 > tr = repo.transaction('buggy')
21 > # make sure we rollback the transaction as we don't want to rely on the__del__
21 > # make sure we rollback the transaction as we don't want to rely on the__del__
22 > tr.release()
22 > tr.release()
23 >
23 >
24 > @command('properlocking', [], '')
24 > @command('properlocking', [], '')
25 > def properlocking(ui, repo):
25 > def properlocking(ui, repo):
26 > """check that reentrance is fine"""
26 > """check that reentrance is fine"""
27 > wl = repo.wlock()
27 > wl = repo.wlock()
28 > lo = repo.lock()
28 > lo = repo.lock()
29 > tr = repo.transaction('proper')
29 > tr = repo.transaction('proper')
30 > tr2 = repo.transaction('proper')
30 > tr2 = repo.transaction('proper')
31 > lo2 = repo.lock()
31 > lo2 = repo.lock()
32 > wl2 = repo.wlock()
32 > wl2 = repo.wlock()
33 > wl2.release()
33 > wl2.release()
34 > lo2.release()
34 > lo2.release()
35 > tr2.close()
35 > tr2.close()
36 > tr.close()
36 > tr.close()
37 > lo.release()
37 > lo.release()
38 > wl.release()
38 > wl.release()
39 >
39 >
40 > @command('nowaitlocking', [], '')
40 > @command('nowaitlocking', [], '')
41 > def nowaitlocking(ui, repo):
41 > def nowaitlocking(ui, repo):
42 > lo = repo.lock()
42 > lo = repo.lock()
43 > wl = repo.wlock(wait=False)
43 > wl = repo.wlock(wait=False)
44 > wl.release()
44 > wl.release()
45 > lo.release()
45 > lo.release()
46 >
46 >
47 > @command('stripintr', [], '')
47 > @command('stripintr', [], '')
48 > def stripintr(ui, repo):
48 > def stripintr(ui, repo):
49 > lo = repo.lock()
49 > lo = repo.lock()
50 > tr = repo.transaction('foobar')
50 > tr = repo.transaction('foobar')
51 > try:
51 > try:
52 > repair.strip(repo.ui, repo, [repo['.'].node()])
52 > repair.strip(repo.ui, repo, [repo['.'].node()])
53 > finally:
53 > finally:
54 > lo.release()
54 > lo.release()
55 > @command('oldanddeprecated', [], '')
55 > @command('oldanddeprecated', [], '')
56 > def oldanddeprecated(ui, repo):
56 > def oldanddeprecated(ui, repo):
57 > """test deprecation warning API"""
57 > """test deprecation warning API"""
58 > def foobar(ui):
58 > def foobar(ui):
59 > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
59 > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
60 > foobar(ui)
60 > foobar(ui)
61 >
61 >
62 > def oldstylerevset(repo, subset, x):
62 > def oldstylerevset(repo, subset, x):
63 > return list(subset)
63 > return list(subset)
64 >
64 >
65 > revset.symbols['oldstyle'] = oldstylerevset
65 > revset.symbols['oldstyle'] = oldstylerevset
66 > EOF
66 > EOF
67
67
68 $ cat << EOF >> $HGRCPATH
68 $ cat << EOF >> $HGRCPATH
69 > [extensions]
69 > [extensions]
70 > buggylocking=$TESTTMP/buggylocking.py
70 > buggylocking=$TESTTMP/buggylocking.py
71 > mock=$TESTDIR/mockblackbox.py
71 > mock=$TESTDIR/mockblackbox.py
72 > blackbox=
72 > blackbox=
73 > [devel]
73 > [devel]
74 > all-warnings=1
74 > all-warnings=1
75 > EOF
75 > EOF
76
76
77 $ hg init lock-checker
77 $ hg init lock-checker
78 $ cd lock-checker
78 $ cd lock-checker
79 $ hg buggylocking
79 $ hg buggylocking
80 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
80 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
81 $ cat << EOF >> $HGRCPATH
81 $ cat << EOF >> $HGRCPATH
82 > [devel]
82 > [devel]
83 > all=0
83 > all=0
84 > check-locks=1
84 > check-locks=1
85 > EOF
85 > EOF
86 $ hg buggylocking
86 $ hg buggylocking
87 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
87 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
88 $ hg buggylocking --traceback
88 $ hg buggylocking --traceback
89 devel-warn: "wlock" acquired after "lock" at:
89 devel-warn: "wlock" acquired after "lock" at:
90 */hg:* in * (glob)
90 */hg:* in * (glob)
91 */mercurial/dispatch.py:* in run (glob)
91 */mercurial/dispatch.py:* in run (glob)
92 */mercurial/dispatch.py:* in dispatch (glob)
92 */mercurial/dispatch.py:* in dispatch (glob)
93 */mercurial/dispatch.py:* in _runcatch (glob)
93 */mercurial/dispatch.py:* in _runcatch (glob)
94 */mercurial/dispatch.py:* in _dispatch (glob)
94 */mercurial/dispatch.py:* in _dispatch (glob)
95 */mercurial/dispatch.py:* in runcommand (glob)
95 */mercurial/dispatch.py:* in runcommand (glob)
96 */mercurial/dispatch.py:* in _runcommand (glob)
96 */mercurial/dispatch.py:* in _runcommand (glob)
97 */mercurial/dispatch.py:* in checkargs (glob)
97 */mercurial/dispatch.py:* in checkargs (glob)
98 */mercurial/dispatch.py:* in <lambda> (glob)
98 */mercurial/dispatch.py:* in <lambda> (glob)
99 */mercurial/util.py:* in check (glob)
99 */mercurial/util.py:* in check (glob)
100 $TESTTMP/buggylocking.py:* in buggylocking (glob)
100 $TESTTMP/buggylocking.py:* in buggylocking (glob)
101 $ hg properlocking
101 $ hg properlocking
102 $ hg nowaitlocking
102 $ hg nowaitlocking
103
103
104 $ echo a > a
104 $ echo a > a
105 $ hg add a
105 $ hg add a
106 $ hg commit -m a
106 $ hg commit -m a
107 $ hg stripintr
107 $ hg stripintr
108 saved backup bundle to $TESTTMP/lock-checker/.hg/strip-backup/*-backup.hg (glob)
108 saved backup bundle to $TESTTMP/lock-checker/.hg/strip-backup/*-backup.hg (glob)
109 abort: programming error: cannot strip from inside a transaction
109 abort: programming error: cannot strip from inside a transaction
110 (contact your extension maintainer)
110 (contact your extension maintainer)
111 [255]
111 [255]
112
112
113 $ hg log -r "oldstyle()" -T '{rev}\n'
113 $ hg log -r "oldstyle()" -T '{rev}\n'
114 devel-warn: revset "oldstyle" uses list instead of smartset
114 devel-warn: revset "oldstyle" uses list instead of smartset
115 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
115 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
116 0
116 0
117 $ hg oldanddeprecated
117 $ hg oldanddeprecated
118 devel-warn: foorbar is deprecated, go shopping
118 devel-warn: foorbar is deprecated, go shopping
119 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
119 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
120
120
121 $ hg oldanddeprecated --traceback
121 $ hg oldanddeprecated --traceback
122 devel-warn: foorbar is deprecated, go shopping
122 devel-warn: foorbar is deprecated, go shopping
123 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
123 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
124 */hg:* in <module> (glob)
124 */hg:* in <module> (glob)
125 */mercurial/dispatch.py:* in run (glob)
125 */mercurial/dispatch.py:* in run (glob)
126 */mercurial/dispatch.py:* in dispatch (glob)
126 */mercurial/dispatch.py:* in dispatch (glob)
127 */mercurial/dispatch.py:* in _runcatch (glob)
127 */mercurial/dispatch.py:* in _runcatch (glob)
128 */mercurial/dispatch.py:* in _dispatch (glob)
128 */mercurial/dispatch.py:* in _dispatch (glob)
129 */mercurial/dispatch.py:* in runcommand (glob)
129 */mercurial/dispatch.py:* in runcommand (glob)
130 */mercurial/dispatch.py:* in _runcommand (glob)
130 */mercurial/dispatch.py:* in _runcommand (glob)
131 */mercurial/dispatch.py:* in checkargs (glob)
131 */mercurial/dispatch.py:* in checkargs (glob)
132 */mercurial/dispatch.py:* in <lambda> (glob)
132 */mercurial/dispatch.py:* in <lambda> (glob)
133 */mercurial/util.py:* in check (glob)
133 */mercurial/util.py:* in check (glob)
134 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
134 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
135 $ hg blackbox -l 9
135 $ hg blackbox -l 9
136 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: revset "oldstyle" uses list instead of smartset
136 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: revset "oldstyle" uses list instead of smartset
137 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
137 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
138 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> log -r oldstyle() -T {rev}\n exited 0 after * seconds (glob)
138 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> log -r oldstyle() -T {rev}\n exited 0 after * seconds (glob)
139 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
139 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
140 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
140 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
141 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
141 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
142 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
142 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
143 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
143 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
144 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
144 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
145 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
145 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
146 */hg:* in <module> (glob)
146 */hg:* in <module> (glob)
147 */mercurial/dispatch.py:* in run (glob)
147 */mercurial/dispatch.py:* in run (glob)
148 */mercurial/dispatch.py:* in dispatch (glob)
148 */mercurial/dispatch.py:* in dispatch (glob)
149 */mercurial/dispatch.py:* in _runcatch (glob)
149 */mercurial/dispatch.py:* in _runcatch (glob)
150 */mercurial/dispatch.py:* in _dispatch (glob)
150 */mercurial/dispatch.py:* in _dispatch (glob)
151 */mercurial/dispatch.py:* in runcommand (glob)
151 */mercurial/dispatch.py:* in runcommand (glob)
152 */mercurial/dispatch.py:* in _runcommand (glob)
152 */mercurial/dispatch.py:* in _runcommand (glob)
153 */mercurial/dispatch.py:* in checkargs (glob)
153 */mercurial/dispatch.py:* in checkargs (glob)
154 */mercurial/dispatch.py:* in <lambda> (glob)
154 */mercurial/dispatch.py:* in <lambda> (glob)
155 */mercurial/util.py:* in check (glob)
155 */mercurial/util.py:* in check (glob)
156 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
156 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
157 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
157 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
158 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 9
158 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 9
159
159
160 Test programming error failure:
160 Test programming error failure:
161
161
162 $ hg buggytransaction
162 $ hg buggytransaction 2>&1 | egrep -v '^ '
163 devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
163 ** Unknown exception encountered with possibly-broken third-party extension buggylocking
164 ** which supports versions unknown of Mercurial.
165 ** Please disable buggylocking and try your action again.
166 ** If that fixes the bug please report it to the extension author.
167 ** Python * (glob)
168 ** Mercurial Distributed SCM (*) (glob)
169 ** Extensions loaded: * (glob)
170 Traceback (most recent call last):
171 RuntimeError: programming error: transaction requires locking
164
172
165 $ cd ..
173 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now