##// END OF EJS Templates
lock: show more detail for new-style locks in lock waiting message (issue4752)...
Mark Ignacio -
r29883:0c8c388c default
parent child Browse files
Show More
@@ -1,1977 +1,1984 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 urlerr = util.urlerr
61 urlerr = util.urlerr
62 urlreq = util.urlreq
62 urlreq = util.urlreq
63
63
64 class repofilecache(scmutil.filecache):
64 class repofilecache(scmutil.filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 if repo is None:
69 if repo is None:
70 return self
70 return self
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 def __set__(self, repo, value):
72 def __set__(self, repo, value):
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 def __delete__(self, repo):
74 def __delete__(self, repo):
75 return super(repofilecache, self).__delete__(repo.unfiltered())
75 return super(repofilecache, self).__delete__(repo.unfiltered())
76
76
77 class storecache(repofilecache):
77 class storecache(repofilecache):
78 """filecache for files in the store"""
78 """filecache for files in the store"""
79 def join(self, obj, fname):
79 def join(self, obj, fname):
80 return obj.sjoin(fname)
80 return obj.sjoin(fname)
81
81
82 class unfilteredpropertycache(util.propertycache):
82 class unfilteredpropertycache(util.propertycache):
83 """propertycache that apply to unfiltered repo only"""
83 """propertycache that apply to unfiltered repo only"""
84
84
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 unfi = repo.unfiltered()
86 unfi = repo.unfiltered()
87 if unfi is repo:
87 if unfi is repo:
88 return super(unfilteredpropertycache, self).__get__(unfi)
88 return super(unfilteredpropertycache, self).__get__(unfi)
89 return getattr(unfi, self.name)
89 return getattr(unfi, self.name)
90
90
91 class filteredpropertycache(util.propertycache):
91 class filteredpropertycache(util.propertycache):
92 """propertycache that must take filtering in account"""
92 """propertycache that must take filtering in account"""
93
93
94 def cachevalue(self, obj, value):
94 def cachevalue(self, obj, value):
95 object.__setattr__(obj, self.name, value)
95 object.__setattr__(obj, self.name, value)
96
96
97
97
98 def hasunfilteredcache(repo, name):
98 def hasunfilteredcache(repo, name):
99 """check if a repo has an unfilteredpropertycache value for <name>"""
99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 return name in vars(repo.unfiltered())
100 return name in vars(repo.unfiltered())
101
101
102 def unfilteredmethod(orig):
102 def unfilteredmethod(orig):
103 """decorate method that always need to be run on unfiltered version"""
103 """decorate method that always need to be run on unfiltered version"""
104 def wrapper(repo, *args, **kwargs):
104 def wrapper(repo, *args, **kwargs):
105 return orig(repo.unfiltered(), *args, **kwargs)
105 return orig(repo.unfiltered(), *args, **kwargs)
106 return wrapper
106 return wrapper
107
107
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 'unbundle'))
109 'unbundle'))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111
111
112 class localpeer(peer.peerrepository):
112 class localpeer(peer.peerrepository):
113 '''peer for a local repo; reflects only the most recent API'''
113 '''peer for a local repo; reflects only the most recent API'''
114
114
115 def __init__(self, repo, caps=moderncaps):
115 def __init__(self, repo, caps=moderncaps):
116 peer.peerrepository.__init__(self)
116 peer.peerrepository.__init__(self)
117 self._repo = repo.filtered('served')
117 self._repo = repo.filtered('served')
118 self.ui = repo.ui
118 self.ui = repo.ui
119 self._caps = repo._restrictcapabilities(caps)
119 self._caps = repo._restrictcapabilities(caps)
120 self.requirements = repo.requirements
120 self.requirements = repo.requirements
121 self.supportedformats = repo.supportedformats
121 self.supportedformats = repo.supportedformats
122
122
123 def close(self):
123 def close(self):
124 self._repo.close()
124 self._repo.close()
125
125
126 def _capabilities(self):
126 def _capabilities(self):
127 return self._caps
127 return self._caps
128
128
129 def local(self):
129 def local(self):
130 return self._repo
130 return self._repo
131
131
132 def canpush(self):
132 def canpush(self):
133 return True
133 return True
134
134
135 def url(self):
135 def url(self):
136 return self._repo.url()
136 return self._repo.url()
137
137
138 def lookup(self, key):
138 def lookup(self, key):
139 return self._repo.lookup(key)
139 return self._repo.lookup(key)
140
140
141 def branchmap(self):
141 def branchmap(self):
142 return self._repo.branchmap()
142 return self._repo.branchmap()
143
143
144 def heads(self):
144 def heads(self):
145 return self._repo.heads()
145 return self._repo.heads()
146
146
147 def known(self, nodes):
147 def known(self, nodes):
148 return self._repo.known(nodes)
148 return self._repo.known(nodes)
149
149
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 **kwargs):
151 **kwargs):
152 cg = exchange.getbundle(self._repo, source, heads=heads,
152 cg = exchange.getbundle(self._repo, source, heads=heads,
153 common=common, bundlecaps=bundlecaps, **kwargs)
153 common=common, bundlecaps=bundlecaps, **kwargs)
154 if bundlecaps is not None and 'HG20' in bundlecaps:
154 if bundlecaps is not None and 'HG20' in bundlecaps:
155 # When requesting a bundle2, getbundle returns a stream to make the
155 # When requesting a bundle2, getbundle returns a stream to make the
156 # wire level function happier. We need to build a proper object
156 # wire level function happier. We need to build a proper object
157 # from it in local peer.
157 # from it in local peer.
158 cg = bundle2.getunbundler(self.ui, cg)
158 cg = bundle2.getunbundler(self.ui, cg)
159 return cg
159 return cg
160
160
161 # TODO We might want to move the next two calls into legacypeer and add
161 # TODO We might want to move the next two calls into legacypeer and add
162 # unbundle instead.
162 # unbundle instead.
163
163
164 def unbundle(self, cg, heads, url):
164 def unbundle(self, cg, heads, url):
165 """apply a bundle on a repo
165 """apply a bundle on a repo
166
166
167 This function handles the repo locking itself."""
167 This function handles the repo locking itself."""
168 try:
168 try:
169 try:
169 try:
170 cg = exchange.readbundle(self.ui, cg, None)
170 cg = exchange.readbundle(self.ui, cg, None)
171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
172 if util.safehasattr(ret, 'getchunks'):
172 if util.safehasattr(ret, 'getchunks'):
173 # This is a bundle20 object, turn it into an unbundler.
173 # This is a bundle20 object, turn it into an unbundler.
174 # This little dance should be dropped eventually when the
174 # This little dance should be dropped eventually when the
175 # API is finally improved.
175 # API is finally improved.
176 stream = util.chunkbuffer(ret.getchunks())
176 stream = util.chunkbuffer(ret.getchunks())
177 ret = bundle2.getunbundler(self.ui, stream)
177 ret = bundle2.getunbundler(self.ui, stream)
178 return ret
178 return ret
179 except Exception as exc:
179 except Exception as exc:
180 # If the exception contains output salvaged from a bundle2
180 # If the exception contains output salvaged from a bundle2
181 # reply, we need to make sure it is printed before continuing
181 # reply, we need to make sure it is printed before continuing
182 # to fail. So we build a bundle2 with such output and consume
182 # to fail. So we build a bundle2 with such output and consume
183 # it directly.
183 # it directly.
184 #
184 #
185 # This is not very elegant but allows a "simple" solution for
185 # This is not very elegant but allows a "simple" solution for
186 # issue4594
186 # issue4594
187 output = getattr(exc, '_bundle2salvagedoutput', ())
187 output = getattr(exc, '_bundle2salvagedoutput', ())
188 if output:
188 if output:
189 bundler = bundle2.bundle20(self._repo.ui)
189 bundler = bundle2.bundle20(self._repo.ui)
190 for out in output:
190 for out in output:
191 bundler.addpart(out)
191 bundler.addpart(out)
192 stream = util.chunkbuffer(bundler.getchunks())
192 stream = util.chunkbuffer(bundler.getchunks())
193 b = bundle2.getunbundler(self.ui, stream)
193 b = bundle2.getunbundler(self.ui, stream)
194 bundle2.processbundle(self._repo, b)
194 bundle2.processbundle(self._repo, b)
195 raise
195 raise
196 except error.PushRaced as exc:
196 except error.PushRaced as exc:
197 raise error.ResponseError(_('push failed:'), str(exc))
197 raise error.ResponseError(_('push failed:'), str(exc))
198
198
199 def lock(self):
199 def lock(self):
200 return self._repo.lock()
200 return self._repo.lock()
201
201
202 def addchangegroup(self, cg, source, url):
202 def addchangegroup(self, cg, source, url):
203 return cg.apply(self._repo, source, url)
203 return cg.apply(self._repo, source, url)
204
204
205 def pushkey(self, namespace, key, old, new):
205 def pushkey(self, namespace, key, old, new):
206 return self._repo.pushkey(namespace, key, old, new)
206 return self._repo.pushkey(namespace, key, old, new)
207
207
208 def listkeys(self, namespace):
208 def listkeys(self, namespace):
209 return self._repo.listkeys(namespace)
209 return self._repo.listkeys(namespace)
210
210
211 def debugwireargs(self, one, two, three=None, four=None, five=None):
211 def debugwireargs(self, one, two, three=None, four=None, five=None):
212 '''used to test argument passing over the wire'''
212 '''used to test argument passing over the wire'''
213 return "%s %s %s %s %s" % (one, two, three, four, five)
213 return "%s %s %s %s %s" % (one, two, three, four, five)
214
214
215 class locallegacypeer(localpeer):
215 class locallegacypeer(localpeer):
216 '''peer extension which implements legacy methods too; used for tests with
216 '''peer extension which implements legacy methods too; used for tests with
217 restricted capabilities'''
217 restricted capabilities'''
218
218
219 def __init__(self, repo):
219 def __init__(self, repo):
220 localpeer.__init__(self, repo, caps=legacycaps)
220 localpeer.__init__(self, repo, caps=legacycaps)
221
221
222 def branches(self, nodes):
222 def branches(self, nodes):
223 return self._repo.branches(nodes)
223 return self._repo.branches(nodes)
224
224
225 def between(self, pairs):
225 def between(self, pairs):
226 return self._repo.between(pairs)
226 return self._repo.between(pairs)
227
227
228 def changegroup(self, basenodes, source):
228 def changegroup(self, basenodes, source):
229 return changegroup.changegroup(self._repo, basenodes, source)
229 return changegroup.changegroup(self._repo, basenodes, source)
230
230
231 def changegroupsubset(self, bases, heads, source):
231 def changegroupsubset(self, bases, heads, source):
232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
233
233
234 class localrepository(object):
234 class localrepository(object):
235
235
236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
237 'manifestv2'))
237 'manifestv2'))
238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
239 'dotencode'))
239 'dotencode'))
240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
241 filtername = None
241 filtername = None
242
242
243 # a list of (ui, featureset) functions.
243 # a list of (ui, featureset) functions.
244 # only functions defined in module of enabled extensions are invoked
244 # only functions defined in module of enabled extensions are invoked
245 featuresetupfuncs = set()
245 featuresetupfuncs = set()
246
246
247 def __init__(self, baseui, path=None, create=False):
247 def __init__(self, baseui, path=None, create=False):
248 self.requirements = set()
248 self.requirements = set()
249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 self.wopener = self.wvfs
250 self.wopener = self.wvfs
251 self.root = self.wvfs.base
251 self.root = self.wvfs.base
252 self.path = self.wvfs.join(".hg")
252 self.path = self.wvfs.join(".hg")
253 self.origroot = path
253 self.origroot = path
254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 realfs=False)
256 realfs=False)
257 self.vfs = scmutil.vfs(self.path)
257 self.vfs = scmutil.vfs(self.path)
258 self.opener = self.vfs
258 self.opener = self.vfs
259 self.baseui = baseui
259 self.baseui = baseui
260 self.ui = baseui.copy()
260 self.ui = baseui.copy()
261 self.ui.copy = baseui.copy # prevent copying repo configuration
261 self.ui.copy = baseui.copy # prevent copying repo configuration
262 # A list of callback to shape the phase if no data were found.
262 # A list of callback to shape the phase if no data were found.
263 # Callback are in the form: func(repo, roots) --> processed root.
263 # Callback are in the form: func(repo, roots) --> processed root.
264 # This list it to be filled by extension during repo setup
264 # This list it to be filled by extension during repo setup
265 self._phasedefaults = []
265 self._phasedefaults = []
266 try:
266 try:
267 self.ui.readconfig(self.join("hgrc"), self.root)
267 self.ui.readconfig(self.join("hgrc"), self.root)
268 extensions.loadall(self.ui)
268 extensions.loadall(self.ui)
269 except IOError:
269 except IOError:
270 pass
270 pass
271
271
272 if self.featuresetupfuncs:
272 if self.featuresetupfuncs:
273 self.supported = set(self._basesupported) # use private copy
273 self.supported = set(self._basesupported) # use private copy
274 extmods = set(m.__name__ for n, m
274 extmods = set(m.__name__ for n, m
275 in extensions.extensions(self.ui))
275 in extensions.extensions(self.ui))
276 for setupfunc in self.featuresetupfuncs:
276 for setupfunc in self.featuresetupfuncs:
277 if setupfunc.__module__ in extmods:
277 if setupfunc.__module__ in extmods:
278 setupfunc(self.ui, self.supported)
278 setupfunc(self.ui, self.supported)
279 else:
279 else:
280 self.supported = self._basesupported
280 self.supported = self._basesupported
281
281
282 if not self.vfs.isdir():
282 if not self.vfs.isdir():
283 if create:
283 if create:
284 self.requirements = newreporequirements(self)
284 self.requirements = newreporequirements(self)
285
285
286 if not self.wvfs.exists():
286 if not self.wvfs.exists():
287 self.wvfs.makedirs()
287 self.wvfs.makedirs()
288 self.vfs.makedir(notindexed=True)
288 self.vfs.makedir(notindexed=True)
289
289
290 if 'store' in self.requirements:
290 if 'store' in self.requirements:
291 self.vfs.mkdir("store")
291 self.vfs.mkdir("store")
292
292
293 # create an invalid changelog
293 # create an invalid changelog
294 self.vfs.append(
294 self.vfs.append(
295 "00changelog.i",
295 "00changelog.i",
296 '\0\0\0\2' # represents revlogv2
296 '\0\0\0\2' # represents revlogv2
297 ' dummy changelog to prevent using the old repo layout'
297 ' dummy changelog to prevent using the old repo layout'
298 )
298 )
299 else:
299 else:
300 raise error.RepoError(_("repository %s not found") % path)
300 raise error.RepoError(_("repository %s not found") % path)
301 elif create:
301 elif create:
302 raise error.RepoError(_("repository %s already exists") % path)
302 raise error.RepoError(_("repository %s already exists") % path)
303 else:
303 else:
304 try:
304 try:
305 self.requirements = scmutil.readrequires(
305 self.requirements = scmutil.readrequires(
306 self.vfs, self.supported)
306 self.vfs, self.supported)
307 except IOError as inst:
307 except IOError as inst:
308 if inst.errno != errno.ENOENT:
308 if inst.errno != errno.ENOENT:
309 raise
309 raise
310
310
311 self.sharedpath = self.path
311 self.sharedpath = self.path
312 try:
312 try:
313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
314 realpath=True)
314 realpath=True)
315 s = vfs.base
315 s = vfs.base
316 if not vfs.exists():
316 if not vfs.exists():
317 raise error.RepoError(
317 raise error.RepoError(
318 _('.hg/sharedpath points to nonexistent directory %s') % s)
318 _('.hg/sharedpath points to nonexistent directory %s') % s)
319 self.sharedpath = s
319 self.sharedpath = s
320 except IOError as inst:
320 except IOError as inst:
321 if inst.errno != errno.ENOENT:
321 if inst.errno != errno.ENOENT:
322 raise
322 raise
323
323
324 self.store = store.store(
324 self.store = store.store(
325 self.requirements, self.sharedpath, scmutil.vfs)
325 self.requirements, self.sharedpath, scmutil.vfs)
326 self.spath = self.store.path
326 self.spath = self.store.path
327 self.svfs = self.store.vfs
327 self.svfs = self.store.vfs
328 self.sjoin = self.store.join
328 self.sjoin = self.store.join
329 self.vfs.createmode = self.store.createmode
329 self.vfs.createmode = self.store.createmode
330 self._applyopenerreqs()
330 self._applyopenerreqs()
331 if create:
331 if create:
332 self._writerequirements()
332 self._writerequirements()
333
333
334 self._dirstatevalidatewarned = False
334 self._dirstatevalidatewarned = False
335
335
336 self._branchcaches = {}
336 self._branchcaches = {}
337 self._revbranchcache = None
337 self._revbranchcache = None
338 self.filterpats = {}
338 self.filterpats = {}
339 self._datafilters = {}
339 self._datafilters = {}
340 self._transref = self._lockref = self._wlockref = None
340 self._transref = self._lockref = self._wlockref = None
341
341
342 # A cache for various files under .hg/ that tracks file changes,
342 # A cache for various files under .hg/ that tracks file changes,
343 # (used by the filecache decorator)
343 # (used by the filecache decorator)
344 #
344 #
345 # Maps a property name to its util.filecacheentry
345 # Maps a property name to its util.filecacheentry
346 self._filecache = {}
346 self._filecache = {}
347
347
348 # hold sets of revision to be filtered
348 # hold sets of revision to be filtered
349 # should be cleared when something might have changed the filter value:
349 # should be cleared when something might have changed the filter value:
350 # - new changesets,
350 # - new changesets,
351 # - phase change,
351 # - phase change,
352 # - new obsolescence marker,
352 # - new obsolescence marker,
353 # - working directory parent change,
353 # - working directory parent change,
354 # - bookmark changes
354 # - bookmark changes
355 self.filteredrevcache = {}
355 self.filteredrevcache = {}
356
356
357 # generic mapping between names and nodes
357 # generic mapping between names and nodes
358 self.names = namespaces.namespaces()
358 self.names = namespaces.namespaces()
359
359
360 def close(self):
360 def close(self):
361 self._writecaches()
361 self._writecaches()
362
362
363 def _writecaches(self):
363 def _writecaches(self):
364 if self._revbranchcache:
364 if self._revbranchcache:
365 self._revbranchcache.write()
365 self._revbranchcache.write()
366
366
367 def _restrictcapabilities(self, caps):
367 def _restrictcapabilities(self, caps):
368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
369 caps = set(caps)
369 caps = set(caps)
370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
371 caps.add('bundle2=' + urlreq.quote(capsblob))
371 caps.add('bundle2=' + urlreq.quote(capsblob))
372 return caps
372 return caps
373
373
374 def _applyopenerreqs(self):
374 def _applyopenerreqs(self):
375 self.svfs.options = dict((r, 1) for r in self.requirements
375 self.svfs.options = dict((r, 1) for r in self.requirements
376 if r in self.openerreqs)
376 if r in self.openerreqs)
377 # experimental config: format.chunkcachesize
377 # experimental config: format.chunkcachesize
378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
379 if chunkcachesize is not None:
379 if chunkcachesize is not None:
380 self.svfs.options['chunkcachesize'] = chunkcachesize
380 self.svfs.options['chunkcachesize'] = chunkcachesize
381 # experimental config: format.maxchainlen
381 # experimental config: format.maxchainlen
382 maxchainlen = self.ui.configint('format', 'maxchainlen')
382 maxchainlen = self.ui.configint('format', 'maxchainlen')
383 if maxchainlen is not None:
383 if maxchainlen is not None:
384 self.svfs.options['maxchainlen'] = maxchainlen
384 self.svfs.options['maxchainlen'] = maxchainlen
385 # experimental config: format.manifestcachesize
385 # experimental config: format.manifestcachesize
386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
387 if manifestcachesize is not None:
387 if manifestcachesize is not None:
388 self.svfs.options['manifestcachesize'] = manifestcachesize
388 self.svfs.options['manifestcachesize'] = manifestcachesize
389 # experimental config: format.aggressivemergedeltas
389 # experimental config: format.aggressivemergedeltas
390 aggressivemergedeltas = self.ui.configbool('format',
390 aggressivemergedeltas = self.ui.configbool('format',
391 'aggressivemergedeltas', False)
391 'aggressivemergedeltas', False)
392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
394
394
395 def _writerequirements(self):
395 def _writerequirements(self):
396 scmutil.writerequires(self.vfs, self.requirements)
396 scmutil.writerequires(self.vfs, self.requirements)
397
397
398 def _checknested(self, path):
398 def _checknested(self, path):
399 """Determine if path is a legal nested repository."""
399 """Determine if path is a legal nested repository."""
400 if not path.startswith(self.root):
400 if not path.startswith(self.root):
401 return False
401 return False
402 subpath = path[len(self.root) + 1:]
402 subpath = path[len(self.root) + 1:]
403 normsubpath = util.pconvert(subpath)
403 normsubpath = util.pconvert(subpath)
404
404
405 # XXX: Checking against the current working copy is wrong in
405 # XXX: Checking against the current working copy is wrong in
406 # the sense that it can reject things like
406 # the sense that it can reject things like
407 #
407 #
408 # $ hg cat -r 10 sub/x.txt
408 # $ hg cat -r 10 sub/x.txt
409 #
409 #
410 # if sub/ is no longer a subrepository in the working copy
410 # if sub/ is no longer a subrepository in the working copy
411 # parent revision.
411 # parent revision.
412 #
412 #
413 # However, it can of course also allow things that would have
413 # However, it can of course also allow things that would have
414 # been rejected before, such as the above cat command if sub/
414 # been rejected before, such as the above cat command if sub/
415 # is a subrepository now, but was a normal directory before.
415 # is a subrepository now, but was a normal directory before.
416 # The old path auditor would have rejected by mistake since it
416 # The old path auditor would have rejected by mistake since it
417 # panics when it sees sub/.hg/.
417 # panics when it sees sub/.hg/.
418 #
418 #
419 # All in all, checking against the working copy seems sensible
419 # All in all, checking against the working copy seems sensible
420 # since we want to prevent access to nested repositories on
420 # since we want to prevent access to nested repositories on
421 # the filesystem *now*.
421 # the filesystem *now*.
422 ctx = self[None]
422 ctx = self[None]
423 parts = util.splitpath(subpath)
423 parts = util.splitpath(subpath)
424 while parts:
424 while parts:
425 prefix = '/'.join(parts)
425 prefix = '/'.join(parts)
426 if prefix in ctx.substate:
426 if prefix in ctx.substate:
427 if prefix == normsubpath:
427 if prefix == normsubpath:
428 return True
428 return True
429 else:
429 else:
430 sub = ctx.sub(prefix)
430 sub = ctx.sub(prefix)
431 return sub.checknested(subpath[len(prefix) + 1:])
431 return sub.checknested(subpath[len(prefix) + 1:])
432 else:
432 else:
433 parts.pop()
433 parts.pop()
434 return False
434 return False
435
435
436 def peer(self):
436 def peer(self):
437 return localpeer(self) # not cached to avoid reference cycle
437 return localpeer(self) # not cached to avoid reference cycle
438
438
439 def unfiltered(self):
439 def unfiltered(self):
440 """Return unfiltered version of the repository
440 """Return unfiltered version of the repository
441
441
442 Intended to be overwritten by filtered repo."""
442 Intended to be overwritten by filtered repo."""
443 return self
443 return self
444
444
445 def filtered(self, name):
445 def filtered(self, name):
446 """Return a filtered version of a repository"""
446 """Return a filtered version of a repository"""
447 # build a new class with the mixin and the current class
447 # build a new class with the mixin and the current class
448 # (possibly subclass of the repo)
448 # (possibly subclass of the repo)
449 class proxycls(repoview.repoview, self.unfiltered().__class__):
449 class proxycls(repoview.repoview, self.unfiltered().__class__):
450 pass
450 pass
451 return proxycls(self, name)
451 return proxycls(self, name)
452
452
453 @repofilecache('bookmarks', 'bookmarks.current')
453 @repofilecache('bookmarks', 'bookmarks.current')
454 def _bookmarks(self):
454 def _bookmarks(self):
455 return bookmarks.bmstore(self)
455 return bookmarks.bmstore(self)
456
456
457 @property
457 @property
458 def _activebookmark(self):
458 def _activebookmark(self):
459 return self._bookmarks.active
459 return self._bookmarks.active
460
460
461 def bookmarkheads(self, bookmark):
461 def bookmarkheads(self, bookmark):
462 name = bookmark.split('@', 1)[0]
462 name = bookmark.split('@', 1)[0]
463 heads = []
463 heads = []
464 for mark, n in self._bookmarks.iteritems():
464 for mark, n in self._bookmarks.iteritems():
465 if mark.split('@', 1)[0] == name:
465 if mark.split('@', 1)[0] == name:
466 heads.append(n)
466 heads.append(n)
467 return heads
467 return heads
468
468
469 # _phaserevs and _phasesets depend on changelog. what we need is to
469 # _phaserevs and _phasesets depend on changelog. what we need is to
470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
471 # can't be easily expressed in filecache mechanism.
471 # can't be easily expressed in filecache mechanism.
472 @storecache('phaseroots', '00changelog.i')
472 @storecache('phaseroots', '00changelog.i')
473 def _phasecache(self):
473 def _phasecache(self):
474 return phases.phasecache(self, self._phasedefaults)
474 return phases.phasecache(self, self._phasedefaults)
475
475
476 @storecache('obsstore')
476 @storecache('obsstore')
477 def obsstore(self):
477 def obsstore(self):
478 # read default format for new obsstore.
478 # read default format for new obsstore.
479 # developer config: format.obsstore-version
479 # developer config: format.obsstore-version
480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
481 # rely on obsstore class default when possible.
481 # rely on obsstore class default when possible.
482 kwargs = {}
482 kwargs = {}
483 if defaultformat is not None:
483 if defaultformat is not None:
484 kwargs['defaultformat'] = defaultformat
484 kwargs['defaultformat'] = defaultformat
485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
486 store = obsolete.obsstore(self.svfs, readonly=readonly,
486 store = obsolete.obsstore(self.svfs, readonly=readonly,
487 **kwargs)
487 **kwargs)
488 if store and readonly:
488 if store and readonly:
489 self.ui.warn(
489 self.ui.warn(
490 _('obsolete feature not enabled but %i markers found!\n')
490 _('obsolete feature not enabled but %i markers found!\n')
491 % len(list(store)))
491 % len(list(store)))
492 return store
492 return store
493
493
494 @storecache('00changelog.i')
494 @storecache('00changelog.i')
495 def changelog(self):
495 def changelog(self):
496 c = changelog.changelog(self.svfs)
496 c = changelog.changelog(self.svfs)
497 if 'HG_PENDING' in os.environ:
497 if 'HG_PENDING' in os.environ:
498 p = os.environ['HG_PENDING']
498 p = os.environ['HG_PENDING']
499 if p.startswith(self.root):
499 if p.startswith(self.root):
500 c.readpending('00changelog.i.a')
500 c.readpending('00changelog.i.a')
501 return c
501 return c
502
502
503 @storecache('00manifest.i')
503 @storecache('00manifest.i')
504 def manifest(self):
504 def manifest(self):
505 return manifest.manifest(self.svfs)
505 return manifest.manifest(self.svfs)
506
506
507 @property
507 @property
508 def manifestlog(self):
508 def manifestlog(self):
509 return manifest.manifestlog(self.svfs, self)
509 return manifest.manifestlog(self.svfs, self)
510
510
511 @repofilecache('dirstate')
511 @repofilecache('dirstate')
512 def dirstate(self):
512 def dirstate(self):
513 return dirstate.dirstate(self.vfs, self.ui, self.root,
513 return dirstate.dirstate(self.vfs, self.ui, self.root,
514 self._dirstatevalidate)
514 self._dirstatevalidate)
515
515
516 def _dirstatevalidate(self, node):
516 def _dirstatevalidate(self, node):
517 try:
517 try:
518 self.changelog.rev(node)
518 self.changelog.rev(node)
519 return node
519 return node
520 except error.LookupError:
520 except error.LookupError:
521 if not self._dirstatevalidatewarned:
521 if not self._dirstatevalidatewarned:
522 self._dirstatevalidatewarned = True
522 self._dirstatevalidatewarned = True
523 self.ui.warn(_("warning: ignoring unknown"
523 self.ui.warn(_("warning: ignoring unknown"
524 " working parent %s!\n") % short(node))
524 " working parent %s!\n") % short(node))
525 return nullid
525 return nullid
526
526
527 def __getitem__(self, changeid):
527 def __getitem__(self, changeid):
528 if changeid is None or changeid == wdirrev:
528 if changeid is None or changeid == wdirrev:
529 return context.workingctx(self)
529 return context.workingctx(self)
530 if isinstance(changeid, slice):
530 if isinstance(changeid, slice):
531 return [context.changectx(self, i)
531 return [context.changectx(self, i)
532 for i in xrange(*changeid.indices(len(self)))
532 for i in xrange(*changeid.indices(len(self)))
533 if i not in self.changelog.filteredrevs]
533 if i not in self.changelog.filteredrevs]
534 return context.changectx(self, changeid)
534 return context.changectx(self, changeid)
535
535
536 def __contains__(self, changeid):
536 def __contains__(self, changeid):
537 try:
537 try:
538 self[changeid]
538 self[changeid]
539 return True
539 return True
540 except error.RepoLookupError:
540 except error.RepoLookupError:
541 return False
541 return False
542
542
543 def __nonzero__(self):
543 def __nonzero__(self):
544 return True
544 return True
545
545
546 def __len__(self):
546 def __len__(self):
547 return len(self.changelog)
547 return len(self.changelog)
548
548
549 def __iter__(self):
549 def __iter__(self):
550 return iter(self.changelog)
550 return iter(self.changelog)
551
551
552 def revs(self, expr, *args):
552 def revs(self, expr, *args):
553 '''Find revisions matching a revset.
553 '''Find revisions matching a revset.
554
554
555 The revset is specified as a string ``expr`` that may contain
555 The revset is specified as a string ``expr`` that may contain
556 %-formatting to escape certain types. See ``revset.formatspec``.
556 %-formatting to escape certain types. See ``revset.formatspec``.
557
557
558 Revset aliases from the configuration are not expanded. To expand
558 Revset aliases from the configuration are not expanded. To expand
559 user aliases, consider calling ``scmutil.revrange()``.
559 user aliases, consider calling ``scmutil.revrange()``.
560
560
561 Returns a revset.abstractsmartset, which is a list-like interface
561 Returns a revset.abstractsmartset, which is a list-like interface
562 that contains integer revisions.
562 that contains integer revisions.
563 '''
563 '''
564 expr = revset.formatspec(expr, *args)
564 expr = revset.formatspec(expr, *args)
565 m = revset.match(None, expr)
565 m = revset.match(None, expr)
566 return m(self)
566 return m(self)
567
567
568 def set(self, expr, *args):
568 def set(self, expr, *args):
569 '''Find revisions matching a revset and emit changectx instances.
569 '''Find revisions matching a revset and emit changectx instances.
570
570
571 This is a convenience wrapper around ``revs()`` that iterates the
571 This is a convenience wrapper around ``revs()`` that iterates the
572 result and is a generator of changectx instances.
572 result and is a generator of changectx instances.
573
573
574 Revset aliases from the configuration are not expanded. To expand
574 Revset aliases from the configuration are not expanded. To expand
575 user aliases, consider calling ``scmutil.revrange()``.
575 user aliases, consider calling ``scmutil.revrange()``.
576 '''
576 '''
577 for r in self.revs(expr, *args):
577 for r in self.revs(expr, *args):
578 yield self[r]
578 yield self[r]
579
579
580 def url(self):
580 def url(self):
581 return 'file:' + self.root
581 return 'file:' + self.root
582
582
583 def hook(self, name, throw=False, **args):
583 def hook(self, name, throw=False, **args):
584 """Call a hook, passing this repo instance.
584 """Call a hook, passing this repo instance.
585
585
586 This a convenience method to aid invoking hooks. Extensions likely
586 This a convenience method to aid invoking hooks. Extensions likely
587 won't call this unless they have registered a custom hook or are
587 won't call this unless they have registered a custom hook or are
588 replacing code that is expected to call a hook.
588 replacing code that is expected to call a hook.
589 """
589 """
590 return hook.hook(self.ui, self, name, throw, **args)
590 return hook.hook(self.ui, self, name, throw, **args)
591
591
592 @unfilteredmethod
592 @unfilteredmethod
593 def _tag(self, names, node, message, local, user, date, extra=None,
593 def _tag(self, names, node, message, local, user, date, extra=None,
594 editor=False):
594 editor=False):
595 if isinstance(names, str):
595 if isinstance(names, str):
596 names = (names,)
596 names = (names,)
597
597
598 branches = self.branchmap()
598 branches = self.branchmap()
599 for name in names:
599 for name in names:
600 self.hook('pretag', throw=True, node=hex(node), tag=name,
600 self.hook('pretag', throw=True, node=hex(node), tag=name,
601 local=local)
601 local=local)
602 if name in branches:
602 if name in branches:
603 self.ui.warn(_("warning: tag %s conflicts with existing"
603 self.ui.warn(_("warning: tag %s conflicts with existing"
604 " branch name\n") % name)
604 " branch name\n") % name)
605
605
606 def writetags(fp, names, munge, prevtags):
606 def writetags(fp, names, munge, prevtags):
607 fp.seek(0, 2)
607 fp.seek(0, 2)
608 if prevtags and prevtags[-1] != '\n':
608 if prevtags and prevtags[-1] != '\n':
609 fp.write('\n')
609 fp.write('\n')
610 for name in names:
610 for name in names:
611 if munge:
611 if munge:
612 m = munge(name)
612 m = munge(name)
613 else:
613 else:
614 m = name
614 m = name
615
615
616 if (self._tagscache.tagtypes and
616 if (self._tagscache.tagtypes and
617 name in self._tagscache.tagtypes):
617 name in self._tagscache.tagtypes):
618 old = self.tags().get(name, nullid)
618 old = self.tags().get(name, nullid)
619 fp.write('%s %s\n' % (hex(old), m))
619 fp.write('%s %s\n' % (hex(old), m))
620 fp.write('%s %s\n' % (hex(node), m))
620 fp.write('%s %s\n' % (hex(node), m))
621 fp.close()
621 fp.close()
622
622
623 prevtags = ''
623 prevtags = ''
624 if local:
624 if local:
625 try:
625 try:
626 fp = self.vfs('localtags', 'r+')
626 fp = self.vfs('localtags', 'r+')
627 except IOError:
627 except IOError:
628 fp = self.vfs('localtags', 'a')
628 fp = self.vfs('localtags', 'a')
629 else:
629 else:
630 prevtags = fp.read()
630 prevtags = fp.read()
631
631
632 # local tags are stored in the current charset
632 # local tags are stored in the current charset
633 writetags(fp, names, None, prevtags)
633 writetags(fp, names, None, prevtags)
634 for name in names:
634 for name in names:
635 self.hook('tag', node=hex(node), tag=name, local=local)
635 self.hook('tag', node=hex(node), tag=name, local=local)
636 return
636 return
637
637
638 try:
638 try:
639 fp = self.wfile('.hgtags', 'rb+')
639 fp = self.wfile('.hgtags', 'rb+')
640 except IOError as e:
640 except IOError as e:
641 if e.errno != errno.ENOENT:
641 if e.errno != errno.ENOENT:
642 raise
642 raise
643 fp = self.wfile('.hgtags', 'ab')
643 fp = self.wfile('.hgtags', 'ab')
644 else:
644 else:
645 prevtags = fp.read()
645 prevtags = fp.read()
646
646
647 # committed tags are stored in UTF-8
647 # committed tags are stored in UTF-8
648 writetags(fp, names, encoding.fromlocal, prevtags)
648 writetags(fp, names, encoding.fromlocal, prevtags)
649
649
650 fp.close()
650 fp.close()
651
651
652 self.invalidatecaches()
652 self.invalidatecaches()
653
653
654 if '.hgtags' not in self.dirstate:
654 if '.hgtags' not in self.dirstate:
655 self[None].add(['.hgtags'])
655 self[None].add(['.hgtags'])
656
656
657 m = matchmod.exact(self.root, '', ['.hgtags'])
657 m = matchmod.exact(self.root, '', ['.hgtags'])
658 tagnode = self.commit(message, user, date, extra=extra, match=m,
658 tagnode = self.commit(message, user, date, extra=extra, match=m,
659 editor=editor)
659 editor=editor)
660
660
661 for name in names:
661 for name in names:
662 self.hook('tag', node=hex(node), tag=name, local=local)
662 self.hook('tag', node=hex(node), tag=name, local=local)
663
663
664 return tagnode
664 return tagnode
665
665
666 def tag(self, names, node, message, local, user, date, editor=False):
666 def tag(self, names, node, message, local, user, date, editor=False):
667 '''tag a revision with one or more symbolic names.
667 '''tag a revision with one or more symbolic names.
668
668
669 names is a list of strings or, when adding a single tag, names may be a
669 names is a list of strings or, when adding a single tag, names may be a
670 string.
670 string.
671
671
672 if local is True, the tags are stored in a per-repository file.
672 if local is True, the tags are stored in a per-repository file.
673 otherwise, they are stored in the .hgtags file, and a new
673 otherwise, they are stored in the .hgtags file, and a new
674 changeset is committed with the change.
674 changeset is committed with the change.
675
675
676 keyword arguments:
676 keyword arguments:
677
677
678 local: whether to store tags in non-version-controlled file
678 local: whether to store tags in non-version-controlled file
679 (default False)
679 (default False)
680
680
681 message: commit message to use if committing
681 message: commit message to use if committing
682
682
683 user: name of user to use if committing
683 user: name of user to use if committing
684
684
685 date: date tuple to use if committing'''
685 date: date tuple to use if committing'''
686
686
687 if not local:
687 if not local:
688 m = matchmod.exact(self.root, '', ['.hgtags'])
688 m = matchmod.exact(self.root, '', ['.hgtags'])
689 if any(self.status(match=m, unknown=True, ignored=True)):
689 if any(self.status(match=m, unknown=True, ignored=True)):
690 raise error.Abort(_('working copy of .hgtags is changed'),
690 raise error.Abort(_('working copy of .hgtags is changed'),
691 hint=_('please commit .hgtags manually'))
691 hint=_('please commit .hgtags manually'))
692
692
693 self.tags() # instantiate the cache
693 self.tags() # instantiate the cache
694 self._tag(names, node, message, local, user, date, editor=editor)
694 self._tag(names, node, message, local, user, date, editor=editor)
695
695
696 @filteredpropertycache
696 @filteredpropertycache
697 def _tagscache(self):
697 def _tagscache(self):
698 '''Returns a tagscache object that contains various tags related
698 '''Returns a tagscache object that contains various tags related
699 caches.'''
699 caches.'''
700
700
701 # This simplifies its cache management by having one decorated
701 # This simplifies its cache management by having one decorated
702 # function (this one) and the rest simply fetch things from it.
702 # function (this one) and the rest simply fetch things from it.
703 class tagscache(object):
703 class tagscache(object):
704 def __init__(self):
704 def __init__(self):
705 # These two define the set of tags for this repository. tags
705 # These two define the set of tags for this repository. tags
706 # maps tag name to node; tagtypes maps tag name to 'global' or
706 # maps tag name to node; tagtypes maps tag name to 'global' or
707 # 'local'. (Global tags are defined by .hgtags across all
707 # 'local'. (Global tags are defined by .hgtags across all
708 # heads, and local tags are defined in .hg/localtags.)
708 # heads, and local tags are defined in .hg/localtags.)
709 # They constitute the in-memory cache of tags.
709 # They constitute the in-memory cache of tags.
710 self.tags = self.tagtypes = None
710 self.tags = self.tagtypes = None
711
711
712 self.nodetagscache = self.tagslist = None
712 self.nodetagscache = self.tagslist = None
713
713
714 cache = tagscache()
714 cache = tagscache()
715 cache.tags, cache.tagtypes = self._findtags()
715 cache.tags, cache.tagtypes = self._findtags()
716
716
717 return cache
717 return cache
718
718
719 def tags(self):
719 def tags(self):
720 '''return a mapping of tag to node'''
720 '''return a mapping of tag to node'''
721 t = {}
721 t = {}
722 if self.changelog.filteredrevs:
722 if self.changelog.filteredrevs:
723 tags, tt = self._findtags()
723 tags, tt = self._findtags()
724 else:
724 else:
725 tags = self._tagscache.tags
725 tags = self._tagscache.tags
726 for k, v in tags.iteritems():
726 for k, v in tags.iteritems():
727 try:
727 try:
728 # ignore tags to unknown nodes
728 # ignore tags to unknown nodes
729 self.changelog.rev(v)
729 self.changelog.rev(v)
730 t[k] = v
730 t[k] = v
731 except (error.LookupError, ValueError):
731 except (error.LookupError, ValueError):
732 pass
732 pass
733 return t
733 return t
734
734
735 def _findtags(self):
735 def _findtags(self):
736 '''Do the hard work of finding tags. Return a pair of dicts
736 '''Do the hard work of finding tags. Return a pair of dicts
737 (tags, tagtypes) where tags maps tag name to node, and tagtypes
737 (tags, tagtypes) where tags maps tag name to node, and tagtypes
738 maps tag name to a string like \'global\' or \'local\'.
738 maps tag name to a string like \'global\' or \'local\'.
739 Subclasses or extensions are free to add their own tags, but
739 Subclasses or extensions are free to add their own tags, but
740 should be aware that the returned dicts will be retained for the
740 should be aware that the returned dicts will be retained for the
741 duration of the localrepo object.'''
741 duration of the localrepo object.'''
742
742
743 # XXX what tagtype should subclasses/extensions use? Currently
743 # XXX what tagtype should subclasses/extensions use? Currently
744 # mq and bookmarks add tags, but do not set the tagtype at all.
744 # mq and bookmarks add tags, but do not set the tagtype at all.
745 # Should each extension invent its own tag type? Should there
745 # Should each extension invent its own tag type? Should there
746 # be one tagtype for all such "virtual" tags? Or is the status
746 # be one tagtype for all such "virtual" tags? Or is the status
747 # quo fine?
747 # quo fine?
748
748
749 alltags = {} # map tag name to (node, hist)
749 alltags = {} # map tag name to (node, hist)
750 tagtypes = {}
750 tagtypes = {}
751
751
752 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
752 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
753 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
753 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
754
754
755 # Build the return dicts. Have to re-encode tag names because
755 # Build the return dicts. Have to re-encode tag names because
756 # the tags module always uses UTF-8 (in order not to lose info
756 # the tags module always uses UTF-8 (in order not to lose info
757 # writing to the cache), but the rest of Mercurial wants them in
757 # writing to the cache), but the rest of Mercurial wants them in
758 # local encoding.
758 # local encoding.
759 tags = {}
759 tags = {}
760 for (name, (node, hist)) in alltags.iteritems():
760 for (name, (node, hist)) in alltags.iteritems():
761 if node != nullid:
761 if node != nullid:
762 tags[encoding.tolocal(name)] = node
762 tags[encoding.tolocal(name)] = node
763 tags['tip'] = self.changelog.tip()
763 tags['tip'] = self.changelog.tip()
764 tagtypes = dict([(encoding.tolocal(name), value)
764 tagtypes = dict([(encoding.tolocal(name), value)
765 for (name, value) in tagtypes.iteritems()])
765 for (name, value) in tagtypes.iteritems()])
766 return (tags, tagtypes)
766 return (tags, tagtypes)
767
767
768 def tagtype(self, tagname):
768 def tagtype(self, tagname):
769 '''
769 '''
770 return the type of the given tag. result can be:
770 return the type of the given tag. result can be:
771
771
772 'local' : a local tag
772 'local' : a local tag
773 'global' : a global tag
773 'global' : a global tag
774 None : tag does not exist
774 None : tag does not exist
775 '''
775 '''
776
776
777 return self._tagscache.tagtypes.get(tagname)
777 return self._tagscache.tagtypes.get(tagname)
778
778
779 def tagslist(self):
779 def tagslist(self):
780 '''return a list of tags ordered by revision'''
780 '''return a list of tags ordered by revision'''
781 if not self._tagscache.tagslist:
781 if not self._tagscache.tagslist:
782 l = []
782 l = []
783 for t, n in self.tags().iteritems():
783 for t, n in self.tags().iteritems():
784 l.append((self.changelog.rev(n), t, n))
784 l.append((self.changelog.rev(n), t, n))
785 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
785 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
786
786
787 return self._tagscache.tagslist
787 return self._tagscache.tagslist
788
788
789 def nodetags(self, node):
789 def nodetags(self, node):
790 '''return the tags associated with a node'''
790 '''return the tags associated with a node'''
791 if not self._tagscache.nodetagscache:
791 if not self._tagscache.nodetagscache:
792 nodetagscache = {}
792 nodetagscache = {}
793 for t, n in self._tagscache.tags.iteritems():
793 for t, n in self._tagscache.tags.iteritems():
794 nodetagscache.setdefault(n, []).append(t)
794 nodetagscache.setdefault(n, []).append(t)
795 for tags in nodetagscache.itervalues():
795 for tags in nodetagscache.itervalues():
796 tags.sort()
796 tags.sort()
797 self._tagscache.nodetagscache = nodetagscache
797 self._tagscache.nodetagscache = nodetagscache
798 return self._tagscache.nodetagscache.get(node, [])
798 return self._tagscache.nodetagscache.get(node, [])
799
799
800 def nodebookmarks(self, node):
800 def nodebookmarks(self, node):
801 """return the list of bookmarks pointing to the specified node"""
801 """return the list of bookmarks pointing to the specified node"""
802 marks = []
802 marks = []
803 for bookmark, n in self._bookmarks.iteritems():
803 for bookmark, n in self._bookmarks.iteritems():
804 if n == node:
804 if n == node:
805 marks.append(bookmark)
805 marks.append(bookmark)
806 return sorted(marks)
806 return sorted(marks)
807
807
808 def branchmap(self):
808 def branchmap(self):
809 '''returns a dictionary {branch: [branchheads]} with branchheads
809 '''returns a dictionary {branch: [branchheads]} with branchheads
810 ordered by increasing revision number'''
810 ordered by increasing revision number'''
811 branchmap.updatecache(self)
811 branchmap.updatecache(self)
812 return self._branchcaches[self.filtername]
812 return self._branchcaches[self.filtername]
813
813
814 @unfilteredmethod
814 @unfilteredmethod
815 def revbranchcache(self):
815 def revbranchcache(self):
816 if not self._revbranchcache:
816 if not self._revbranchcache:
817 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
817 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
818 return self._revbranchcache
818 return self._revbranchcache
819
819
820 def branchtip(self, branch, ignoremissing=False):
820 def branchtip(self, branch, ignoremissing=False):
821 '''return the tip node for a given branch
821 '''return the tip node for a given branch
822
822
823 If ignoremissing is True, then this method will not raise an error.
823 If ignoremissing is True, then this method will not raise an error.
824 This is helpful for callers that only expect None for a missing branch
824 This is helpful for callers that only expect None for a missing branch
825 (e.g. namespace).
825 (e.g. namespace).
826
826
827 '''
827 '''
828 try:
828 try:
829 return self.branchmap().branchtip(branch)
829 return self.branchmap().branchtip(branch)
830 except KeyError:
830 except KeyError:
831 if not ignoremissing:
831 if not ignoremissing:
832 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
832 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
833 else:
833 else:
834 pass
834 pass
835
835
836 def lookup(self, key):
836 def lookup(self, key):
837 return self[key].node()
837 return self[key].node()
838
838
839 def lookupbranch(self, key, remote=None):
839 def lookupbranch(self, key, remote=None):
840 repo = remote or self
840 repo = remote or self
841 if key in repo.branchmap():
841 if key in repo.branchmap():
842 return key
842 return key
843
843
844 repo = (remote and remote.local()) and remote or self
844 repo = (remote and remote.local()) and remote or self
845 return repo[key].branch()
845 return repo[key].branch()
846
846
847 def known(self, nodes):
847 def known(self, nodes):
848 cl = self.changelog
848 cl = self.changelog
849 nm = cl.nodemap
849 nm = cl.nodemap
850 filtered = cl.filteredrevs
850 filtered = cl.filteredrevs
851 result = []
851 result = []
852 for n in nodes:
852 for n in nodes:
853 r = nm.get(n)
853 r = nm.get(n)
854 resp = not (r is None or r in filtered)
854 resp = not (r is None or r in filtered)
855 result.append(resp)
855 result.append(resp)
856 return result
856 return result
857
857
858 def local(self):
858 def local(self):
859 return self
859 return self
860
860
861 def publishing(self):
861 def publishing(self):
862 # it's safe (and desirable) to trust the publish flag unconditionally
862 # it's safe (and desirable) to trust the publish flag unconditionally
863 # so that we don't finalize changes shared between users via ssh or nfs
863 # so that we don't finalize changes shared between users via ssh or nfs
864 return self.ui.configbool('phases', 'publish', True, untrusted=True)
864 return self.ui.configbool('phases', 'publish', True, untrusted=True)
865
865
866 def cancopy(self):
866 def cancopy(self):
867 # so statichttprepo's override of local() works
867 # so statichttprepo's override of local() works
868 if not self.local():
868 if not self.local():
869 return False
869 return False
870 if not self.publishing():
870 if not self.publishing():
871 return True
871 return True
872 # if publishing we can't copy if there is filtered content
872 # if publishing we can't copy if there is filtered content
873 return not self.filtered('visible').changelog.filteredrevs
873 return not self.filtered('visible').changelog.filteredrevs
874
874
875 def shared(self):
875 def shared(self):
876 '''the type of shared repository (None if not shared)'''
876 '''the type of shared repository (None if not shared)'''
877 if self.sharedpath != self.path:
877 if self.sharedpath != self.path:
878 return 'store'
878 return 'store'
879 return None
879 return None
880
880
881 def join(self, f, *insidef):
881 def join(self, f, *insidef):
882 return self.vfs.join(os.path.join(f, *insidef))
882 return self.vfs.join(os.path.join(f, *insidef))
883
883
884 def wjoin(self, f, *insidef):
884 def wjoin(self, f, *insidef):
885 return self.vfs.reljoin(self.root, f, *insidef)
885 return self.vfs.reljoin(self.root, f, *insidef)
886
886
887 def file(self, f):
887 def file(self, f):
888 if f[0] == '/':
888 if f[0] == '/':
889 f = f[1:]
889 f = f[1:]
890 return filelog.filelog(self.svfs, f)
890 return filelog.filelog(self.svfs, f)
891
891
892 def changectx(self, changeid):
892 def changectx(self, changeid):
893 return self[changeid]
893 return self[changeid]
894
894
895 def setparents(self, p1, p2=nullid):
895 def setparents(self, p1, p2=nullid):
896 self.dirstate.beginparentchange()
896 self.dirstate.beginparentchange()
897 copies = self.dirstate.setparents(p1, p2)
897 copies = self.dirstate.setparents(p1, p2)
898 pctx = self[p1]
898 pctx = self[p1]
899 if copies:
899 if copies:
900 # Adjust copy records, the dirstate cannot do it, it
900 # Adjust copy records, the dirstate cannot do it, it
901 # requires access to parents manifests. Preserve them
901 # requires access to parents manifests. Preserve them
902 # only for entries added to first parent.
902 # only for entries added to first parent.
903 for f in copies:
903 for f in copies:
904 if f not in pctx and copies[f] in pctx:
904 if f not in pctx and copies[f] in pctx:
905 self.dirstate.copy(copies[f], f)
905 self.dirstate.copy(copies[f], f)
906 if p2 == nullid:
906 if p2 == nullid:
907 for f, s in sorted(self.dirstate.copies().items()):
907 for f, s in sorted(self.dirstate.copies().items()):
908 if f not in pctx and s not in pctx:
908 if f not in pctx and s not in pctx:
909 self.dirstate.copy(None, f)
909 self.dirstate.copy(None, f)
910 self.dirstate.endparentchange()
910 self.dirstate.endparentchange()
911
911
912 def filectx(self, path, changeid=None, fileid=None):
912 def filectx(self, path, changeid=None, fileid=None):
913 """changeid can be a changeset revision, node, or tag.
913 """changeid can be a changeset revision, node, or tag.
914 fileid can be a file revision or node."""
914 fileid can be a file revision or node."""
915 return context.filectx(self, path, changeid, fileid)
915 return context.filectx(self, path, changeid, fileid)
916
916
917 def getcwd(self):
917 def getcwd(self):
918 return self.dirstate.getcwd()
918 return self.dirstate.getcwd()
919
919
920 def pathto(self, f, cwd=None):
920 def pathto(self, f, cwd=None):
921 return self.dirstate.pathto(f, cwd)
921 return self.dirstate.pathto(f, cwd)
922
922
923 def wfile(self, f, mode='r'):
923 def wfile(self, f, mode='r'):
924 return self.wvfs(f, mode)
924 return self.wvfs(f, mode)
925
925
926 def _link(self, f):
926 def _link(self, f):
927 return self.wvfs.islink(f)
927 return self.wvfs.islink(f)
928
928
929 def _loadfilter(self, filter):
929 def _loadfilter(self, filter):
930 if filter not in self.filterpats:
930 if filter not in self.filterpats:
931 l = []
931 l = []
932 for pat, cmd in self.ui.configitems(filter):
932 for pat, cmd in self.ui.configitems(filter):
933 if cmd == '!':
933 if cmd == '!':
934 continue
934 continue
935 mf = matchmod.match(self.root, '', [pat])
935 mf = matchmod.match(self.root, '', [pat])
936 fn = None
936 fn = None
937 params = cmd
937 params = cmd
938 for name, filterfn in self._datafilters.iteritems():
938 for name, filterfn in self._datafilters.iteritems():
939 if cmd.startswith(name):
939 if cmd.startswith(name):
940 fn = filterfn
940 fn = filterfn
941 params = cmd[len(name):].lstrip()
941 params = cmd[len(name):].lstrip()
942 break
942 break
943 if not fn:
943 if not fn:
944 fn = lambda s, c, **kwargs: util.filter(s, c)
944 fn = lambda s, c, **kwargs: util.filter(s, c)
945 # Wrap old filters not supporting keyword arguments
945 # Wrap old filters not supporting keyword arguments
946 if not inspect.getargspec(fn)[2]:
946 if not inspect.getargspec(fn)[2]:
947 oldfn = fn
947 oldfn = fn
948 fn = lambda s, c, **kwargs: oldfn(s, c)
948 fn = lambda s, c, **kwargs: oldfn(s, c)
949 l.append((mf, fn, params))
949 l.append((mf, fn, params))
950 self.filterpats[filter] = l
950 self.filterpats[filter] = l
951 return self.filterpats[filter]
951 return self.filterpats[filter]
952
952
953 def _filter(self, filterpats, filename, data):
953 def _filter(self, filterpats, filename, data):
954 for mf, fn, cmd in filterpats:
954 for mf, fn, cmd in filterpats:
955 if mf(filename):
955 if mf(filename):
956 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
956 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
957 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
957 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
958 break
958 break
959
959
960 return data
960 return data
961
961
962 @unfilteredpropertycache
962 @unfilteredpropertycache
963 def _encodefilterpats(self):
963 def _encodefilterpats(self):
964 return self._loadfilter('encode')
964 return self._loadfilter('encode')
965
965
966 @unfilteredpropertycache
966 @unfilteredpropertycache
967 def _decodefilterpats(self):
967 def _decodefilterpats(self):
968 return self._loadfilter('decode')
968 return self._loadfilter('decode')
969
969
970 def adddatafilter(self, name, filter):
970 def adddatafilter(self, name, filter):
971 self._datafilters[name] = filter
971 self._datafilters[name] = filter
972
972
973 def wread(self, filename):
973 def wread(self, filename):
974 if self._link(filename):
974 if self._link(filename):
975 data = self.wvfs.readlink(filename)
975 data = self.wvfs.readlink(filename)
976 else:
976 else:
977 data = self.wvfs.read(filename)
977 data = self.wvfs.read(filename)
978 return self._filter(self._encodefilterpats, filename, data)
978 return self._filter(self._encodefilterpats, filename, data)
979
979
980 def wwrite(self, filename, data, flags, backgroundclose=False):
980 def wwrite(self, filename, data, flags, backgroundclose=False):
981 """write ``data`` into ``filename`` in the working directory
981 """write ``data`` into ``filename`` in the working directory
982
982
983 This returns length of written (maybe decoded) data.
983 This returns length of written (maybe decoded) data.
984 """
984 """
985 data = self._filter(self._decodefilterpats, filename, data)
985 data = self._filter(self._decodefilterpats, filename, data)
986 if 'l' in flags:
986 if 'l' in flags:
987 self.wvfs.symlink(data, filename)
987 self.wvfs.symlink(data, filename)
988 else:
988 else:
989 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
989 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
990 if 'x' in flags:
990 if 'x' in flags:
991 self.wvfs.setflags(filename, False, True)
991 self.wvfs.setflags(filename, False, True)
992 return len(data)
992 return len(data)
993
993
994 def wwritedata(self, filename, data):
994 def wwritedata(self, filename, data):
995 return self._filter(self._decodefilterpats, filename, data)
995 return self._filter(self._decodefilterpats, filename, data)
996
996
997 def currenttransaction(self):
997 def currenttransaction(self):
998 """return the current transaction or None if non exists"""
998 """return the current transaction or None if non exists"""
999 if self._transref:
999 if self._transref:
1000 tr = self._transref()
1000 tr = self._transref()
1001 else:
1001 else:
1002 tr = None
1002 tr = None
1003
1003
1004 if tr and tr.running():
1004 if tr and tr.running():
1005 return tr
1005 return tr
1006 return None
1006 return None
1007
1007
1008 def transaction(self, desc, report=None):
1008 def transaction(self, desc, report=None):
1009 if (self.ui.configbool('devel', 'all-warnings')
1009 if (self.ui.configbool('devel', 'all-warnings')
1010 or self.ui.configbool('devel', 'check-locks')):
1010 or self.ui.configbool('devel', 'check-locks')):
1011 if self._currentlock(self._lockref) is None:
1011 if self._currentlock(self._lockref) is None:
1012 raise RuntimeError('programming error: transaction requires '
1012 raise RuntimeError('programming error: transaction requires '
1013 'locking')
1013 'locking')
1014 tr = self.currenttransaction()
1014 tr = self.currenttransaction()
1015 if tr is not None:
1015 if tr is not None:
1016 return tr.nest()
1016 return tr.nest()
1017
1017
1018 # abort here if the journal already exists
1018 # abort here if the journal already exists
1019 if self.svfs.exists("journal"):
1019 if self.svfs.exists("journal"):
1020 raise error.RepoError(
1020 raise error.RepoError(
1021 _("abandoned transaction found"),
1021 _("abandoned transaction found"),
1022 hint=_("run 'hg recover' to clean up transaction"))
1022 hint=_("run 'hg recover' to clean up transaction"))
1023
1023
1024 idbase = "%.40f#%f" % (random.random(), time.time())
1024 idbase = "%.40f#%f" % (random.random(), time.time())
1025 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1025 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1027
1027
1028 self._writejournal(desc)
1028 self._writejournal(desc)
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1030 if report:
1030 if report:
1031 rp = report
1031 rp = report
1032 else:
1032 else:
1033 rp = self.ui.warn
1033 rp = self.ui.warn
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1035 # we must avoid cyclic reference between repo and transaction.
1035 # we must avoid cyclic reference between repo and transaction.
1036 reporef = weakref.ref(self)
1036 reporef = weakref.ref(self)
1037 def validate(tr):
1037 def validate(tr):
1038 """will run pre-closing hooks"""
1038 """will run pre-closing hooks"""
1039 reporef().hook('pretxnclose', throw=True,
1039 reporef().hook('pretxnclose', throw=True,
1040 txnname=desc, **tr.hookargs)
1040 txnname=desc, **tr.hookargs)
1041 def releasefn(tr, success):
1041 def releasefn(tr, success):
1042 repo = reporef()
1042 repo = reporef()
1043 if success:
1043 if success:
1044 # this should be explicitly invoked here, because
1044 # this should be explicitly invoked here, because
1045 # in-memory changes aren't written out at closing
1045 # in-memory changes aren't written out at closing
1046 # transaction, if tr.addfilegenerator (via
1046 # transaction, if tr.addfilegenerator (via
1047 # dirstate.write or so) isn't invoked while
1047 # dirstate.write or so) isn't invoked while
1048 # transaction running
1048 # transaction running
1049 repo.dirstate.write(None)
1049 repo.dirstate.write(None)
1050 else:
1050 else:
1051 # discard all changes (including ones already written
1051 # discard all changes (including ones already written
1052 # out) in this transaction
1052 # out) in this transaction
1053 repo.dirstate.restorebackup(None, prefix='journal.')
1053 repo.dirstate.restorebackup(None, prefix='journal.')
1054
1054
1055 repo.invalidate(clearfilecache=True)
1055 repo.invalidate(clearfilecache=True)
1056
1056
1057 tr = transaction.transaction(rp, self.svfs, vfsmap,
1057 tr = transaction.transaction(rp, self.svfs, vfsmap,
1058 "journal",
1058 "journal",
1059 "undo",
1059 "undo",
1060 aftertrans(renames),
1060 aftertrans(renames),
1061 self.store.createmode,
1061 self.store.createmode,
1062 validator=validate,
1062 validator=validate,
1063 releasefn=releasefn)
1063 releasefn=releasefn)
1064
1064
1065 tr.hookargs['txnid'] = txnid
1065 tr.hookargs['txnid'] = txnid
1066 # note: writing the fncache only during finalize mean that the file is
1066 # note: writing the fncache only during finalize mean that the file is
1067 # outdated when running hooks. As fncache is used for streaming clone,
1067 # outdated when running hooks. As fncache is used for streaming clone,
1068 # this is not expected to break anything that happen during the hooks.
1068 # this is not expected to break anything that happen during the hooks.
1069 tr.addfinalize('flush-fncache', self.store.write)
1069 tr.addfinalize('flush-fncache', self.store.write)
1070 def txnclosehook(tr2):
1070 def txnclosehook(tr2):
1071 """To be run if transaction is successful, will schedule a hook run
1071 """To be run if transaction is successful, will schedule a hook run
1072 """
1072 """
1073 # Don't reference tr2 in hook() so we don't hold a reference.
1073 # Don't reference tr2 in hook() so we don't hold a reference.
1074 # This reduces memory consumption when there are multiple
1074 # This reduces memory consumption when there are multiple
1075 # transactions per lock. This can likely go away if issue5045
1075 # transactions per lock. This can likely go away if issue5045
1076 # fixes the function accumulation.
1076 # fixes the function accumulation.
1077 hookargs = tr2.hookargs
1077 hookargs = tr2.hookargs
1078
1078
1079 def hook():
1079 def hook():
1080 reporef().hook('txnclose', throw=False, txnname=desc,
1080 reporef().hook('txnclose', throw=False, txnname=desc,
1081 **hookargs)
1081 **hookargs)
1082 reporef()._afterlock(hook)
1082 reporef()._afterlock(hook)
1083 tr.addfinalize('txnclose-hook', txnclosehook)
1083 tr.addfinalize('txnclose-hook', txnclosehook)
1084 def txnaborthook(tr2):
1084 def txnaborthook(tr2):
1085 """To be run if transaction is aborted
1085 """To be run if transaction is aborted
1086 """
1086 """
1087 reporef().hook('txnabort', throw=False, txnname=desc,
1087 reporef().hook('txnabort', throw=False, txnname=desc,
1088 **tr2.hookargs)
1088 **tr2.hookargs)
1089 tr.addabort('txnabort-hook', txnaborthook)
1089 tr.addabort('txnabort-hook', txnaborthook)
1090 # avoid eager cache invalidation. in-memory data should be identical
1090 # avoid eager cache invalidation. in-memory data should be identical
1091 # to stored data if transaction has no error.
1091 # to stored data if transaction has no error.
1092 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1092 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1093 self._transref = weakref.ref(tr)
1093 self._transref = weakref.ref(tr)
1094 return tr
1094 return tr
1095
1095
1096 def _journalfiles(self):
1096 def _journalfiles(self):
1097 return ((self.svfs, 'journal'),
1097 return ((self.svfs, 'journal'),
1098 (self.vfs, 'journal.dirstate'),
1098 (self.vfs, 'journal.dirstate'),
1099 (self.vfs, 'journal.branch'),
1099 (self.vfs, 'journal.branch'),
1100 (self.vfs, 'journal.desc'),
1100 (self.vfs, 'journal.desc'),
1101 (self.vfs, 'journal.bookmarks'),
1101 (self.vfs, 'journal.bookmarks'),
1102 (self.svfs, 'journal.phaseroots'))
1102 (self.svfs, 'journal.phaseroots'))
1103
1103
1104 def undofiles(self):
1104 def undofiles(self):
1105 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1105 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1106
1106
1107 def _writejournal(self, desc):
1107 def _writejournal(self, desc):
1108 self.dirstate.savebackup(None, prefix='journal.')
1108 self.dirstate.savebackup(None, prefix='journal.')
1109 self.vfs.write("journal.branch",
1109 self.vfs.write("journal.branch",
1110 encoding.fromlocal(self.dirstate.branch()))
1110 encoding.fromlocal(self.dirstate.branch()))
1111 self.vfs.write("journal.desc",
1111 self.vfs.write("journal.desc",
1112 "%d\n%s\n" % (len(self), desc))
1112 "%d\n%s\n" % (len(self), desc))
1113 self.vfs.write("journal.bookmarks",
1113 self.vfs.write("journal.bookmarks",
1114 self.vfs.tryread("bookmarks"))
1114 self.vfs.tryread("bookmarks"))
1115 self.svfs.write("journal.phaseroots",
1115 self.svfs.write("journal.phaseroots",
1116 self.svfs.tryread("phaseroots"))
1116 self.svfs.tryread("phaseroots"))
1117
1117
1118 def recover(self):
1118 def recover(self):
1119 with self.lock():
1119 with self.lock():
1120 if self.svfs.exists("journal"):
1120 if self.svfs.exists("journal"):
1121 self.ui.status(_("rolling back interrupted transaction\n"))
1121 self.ui.status(_("rolling back interrupted transaction\n"))
1122 vfsmap = {'': self.svfs,
1122 vfsmap = {'': self.svfs,
1123 'plain': self.vfs,}
1123 'plain': self.vfs,}
1124 transaction.rollback(self.svfs, vfsmap, "journal",
1124 transaction.rollback(self.svfs, vfsmap, "journal",
1125 self.ui.warn)
1125 self.ui.warn)
1126 self.invalidate()
1126 self.invalidate()
1127 return True
1127 return True
1128 else:
1128 else:
1129 self.ui.warn(_("no interrupted transaction available\n"))
1129 self.ui.warn(_("no interrupted transaction available\n"))
1130 return False
1130 return False
1131
1131
1132 def rollback(self, dryrun=False, force=False):
1132 def rollback(self, dryrun=False, force=False):
1133 wlock = lock = dsguard = None
1133 wlock = lock = dsguard = None
1134 try:
1134 try:
1135 wlock = self.wlock()
1135 wlock = self.wlock()
1136 lock = self.lock()
1136 lock = self.lock()
1137 if self.svfs.exists("undo"):
1137 if self.svfs.exists("undo"):
1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1139
1139
1140 return self._rollback(dryrun, force, dsguard)
1140 return self._rollback(dryrun, force, dsguard)
1141 else:
1141 else:
1142 self.ui.warn(_("no rollback information available\n"))
1142 self.ui.warn(_("no rollback information available\n"))
1143 return 1
1143 return 1
1144 finally:
1144 finally:
1145 release(dsguard, lock, wlock)
1145 release(dsguard, lock, wlock)
1146
1146
1147 @unfilteredmethod # Until we get smarter cache management
1147 @unfilteredmethod # Until we get smarter cache management
1148 def _rollback(self, dryrun, force, dsguard):
1148 def _rollback(self, dryrun, force, dsguard):
1149 ui = self.ui
1149 ui = self.ui
1150 try:
1150 try:
1151 args = self.vfs.read('undo.desc').splitlines()
1151 args = self.vfs.read('undo.desc').splitlines()
1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1153 if len(args) >= 3:
1153 if len(args) >= 3:
1154 detail = args[2]
1154 detail = args[2]
1155 oldtip = oldlen - 1
1155 oldtip = oldlen - 1
1156
1156
1157 if detail and ui.verbose:
1157 if detail and ui.verbose:
1158 msg = (_('repository tip rolled back to revision %s'
1158 msg = (_('repository tip rolled back to revision %s'
1159 ' (undo %s: %s)\n')
1159 ' (undo %s: %s)\n')
1160 % (oldtip, desc, detail))
1160 % (oldtip, desc, detail))
1161 else:
1161 else:
1162 msg = (_('repository tip rolled back to revision %s'
1162 msg = (_('repository tip rolled back to revision %s'
1163 ' (undo %s)\n')
1163 ' (undo %s)\n')
1164 % (oldtip, desc))
1164 % (oldtip, desc))
1165 except IOError:
1165 except IOError:
1166 msg = _('rolling back unknown transaction\n')
1166 msg = _('rolling back unknown transaction\n')
1167 desc = None
1167 desc = None
1168
1168
1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1170 raise error.Abort(
1170 raise error.Abort(
1171 _('rollback of last commit while not checked out '
1171 _('rollback of last commit while not checked out '
1172 'may lose data'), hint=_('use -f to force'))
1172 'may lose data'), hint=_('use -f to force'))
1173
1173
1174 ui.status(msg)
1174 ui.status(msg)
1175 if dryrun:
1175 if dryrun:
1176 return 0
1176 return 0
1177
1177
1178 parents = self.dirstate.parents()
1178 parents = self.dirstate.parents()
1179 self.destroying()
1179 self.destroying()
1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1182 if self.vfs.exists('undo.bookmarks'):
1182 if self.vfs.exists('undo.bookmarks'):
1183 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1183 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1184 if self.svfs.exists('undo.phaseroots'):
1184 if self.svfs.exists('undo.phaseroots'):
1185 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1185 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1186 self.invalidate()
1186 self.invalidate()
1187
1187
1188 parentgone = (parents[0] not in self.changelog.nodemap or
1188 parentgone = (parents[0] not in self.changelog.nodemap or
1189 parents[1] not in self.changelog.nodemap)
1189 parents[1] not in self.changelog.nodemap)
1190 if parentgone:
1190 if parentgone:
1191 # prevent dirstateguard from overwriting already restored one
1191 # prevent dirstateguard from overwriting already restored one
1192 dsguard.close()
1192 dsguard.close()
1193
1193
1194 self.dirstate.restorebackup(None, prefix='undo.')
1194 self.dirstate.restorebackup(None, prefix='undo.')
1195 try:
1195 try:
1196 branch = self.vfs.read('undo.branch')
1196 branch = self.vfs.read('undo.branch')
1197 self.dirstate.setbranch(encoding.tolocal(branch))
1197 self.dirstate.setbranch(encoding.tolocal(branch))
1198 except IOError:
1198 except IOError:
1199 ui.warn(_('named branch could not be reset: '
1199 ui.warn(_('named branch could not be reset: '
1200 'current branch is still \'%s\'\n')
1200 'current branch is still \'%s\'\n')
1201 % self.dirstate.branch())
1201 % self.dirstate.branch())
1202
1202
1203 parents = tuple([p.rev() for p in self[None].parents()])
1203 parents = tuple([p.rev() for p in self[None].parents()])
1204 if len(parents) > 1:
1204 if len(parents) > 1:
1205 ui.status(_('working directory now based on '
1205 ui.status(_('working directory now based on '
1206 'revisions %d and %d\n') % parents)
1206 'revisions %d and %d\n') % parents)
1207 else:
1207 else:
1208 ui.status(_('working directory now based on '
1208 ui.status(_('working directory now based on '
1209 'revision %d\n') % parents)
1209 'revision %d\n') % parents)
1210 mergemod.mergestate.clean(self, self['.'].node())
1210 mergemod.mergestate.clean(self, self['.'].node())
1211
1211
1212 # TODO: if we know which new heads may result from this rollback, pass
1212 # TODO: if we know which new heads may result from this rollback, pass
1213 # them to destroy(), which will prevent the branchhead cache from being
1213 # them to destroy(), which will prevent the branchhead cache from being
1214 # invalidated.
1214 # invalidated.
1215 self.destroyed()
1215 self.destroyed()
1216 return 0
1216 return 0
1217
1217
1218 def invalidatecaches(self):
1218 def invalidatecaches(self):
1219
1219
1220 if '_tagscache' in vars(self):
1220 if '_tagscache' in vars(self):
1221 # can't use delattr on proxy
1221 # can't use delattr on proxy
1222 del self.__dict__['_tagscache']
1222 del self.__dict__['_tagscache']
1223
1223
1224 self.unfiltered()._branchcaches.clear()
1224 self.unfiltered()._branchcaches.clear()
1225 self.invalidatevolatilesets()
1225 self.invalidatevolatilesets()
1226
1226
1227 def invalidatevolatilesets(self):
1227 def invalidatevolatilesets(self):
1228 self.filteredrevcache.clear()
1228 self.filteredrevcache.clear()
1229 obsolete.clearobscaches(self)
1229 obsolete.clearobscaches(self)
1230
1230
1231 def invalidatedirstate(self):
1231 def invalidatedirstate(self):
1232 '''Invalidates the dirstate, causing the next call to dirstate
1232 '''Invalidates the dirstate, causing the next call to dirstate
1233 to check if it was modified since the last time it was read,
1233 to check if it was modified since the last time it was read,
1234 rereading it if it has.
1234 rereading it if it has.
1235
1235
1236 This is different to dirstate.invalidate() that it doesn't always
1236 This is different to dirstate.invalidate() that it doesn't always
1237 rereads the dirstate. Use dirstate.invalidate() if you want to
1237 rereads the dirstate. Use dirstate.invalidate() if you want to
1238 explicitly read the dirstate again (i.e. restoring it to a previous
1238 explicitly read the dirstate again (i.e. restoring it to a previous
1239 known good state).'''
1239 known good state).'''
1240 if hasunfilteredcache(self, 'dirstate'):
1240 if hasunfilteredcache(self, 'dirstate'):
1241 for k in self.dirstate._filecache:
1241 for k in self.dirstate._filecache:
1242 try:
1242 try:
1243 delattr(self.dirstate, k)
1243 delattr(self.dirstate, k)
1244 except AttributeError:
1244 except AttributeError:
1245 pass
1245 pass
1246 delattr(self.unfiltered(), 'dirstate')
1246 delattr(self.unfiltered(), 'dirstate')
1247
1247
1248 def invalidate(self, clearfilecache=False):
1248 def invalidate(self, clearfilecache=False):
1249 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1249 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1250 for k in self._filecache.keys():
1250 for k in self._filecache.keys():
1251 # dirstate is invalidated separately in invalidatedirstate()
1251 # dirstate is invalidated separately in invalidatedirstate()
1252 if k == 'dirstate':
1252 if k == 'dirstate':
1253 continue
1253 continue
1254
1254
1255 if clearfilecache:
1255 if clearfilecache:
1256 del self._filecache[k]
1256 del self._filecache[k]
1257 try:
1257 try:
1258 delattr(unfiltered, k)
1258 delattr(unfiltered, k)
1259 except AttributeError:
1259 except AttributeError:
1260 pass
1260 pass
1261 self.invalidatecaches()
1261 self.invalidatecaches()
1262 self.store.invalidatecaches()
1262 self.store.invalidatecaches()
1263
1263
1264 def invalidateall(self):
1264 def invalidateall(self):
1265 '''Fully invalidates both store and non-store parts, causing the
1265 '''Fully invalidates both store and non-store parts, causing the
1266 subsequent operation to reread any outside changes.'''
1266 subsequent operation to reread any outside changes.'''
1267 # extension should hook this to invalidate its caches
1267 # extension should hook this to invalidate its caches
1268 self.invalidate()
1268 self.invalidate()
1269 self.invalidatedirstate()
1269 self.invalidatedirstate()
1270
1270
1271 def _refreshfilecachestats(self, tr):
1271 def _refreshfilecachestats(self, tr):
1272 """Reload stats of cached files so that they are flagged as valid"""
1272 """Reload stats of cached files so that they are flagged as valid"""
1273 for k, ce in self._filecache.items():
1273 for k, ce in self._filecache.items():
1274 if k == 'dirstate' or k not in self.__dict__:
1274 if k == 'dirstate' or k not in self.__dict__:
1275 continue
1275 continue
1276 ce.refresh()
1276 ce.refresh()
1277
1277
1278 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1278 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1279 inheritchecker=None, parentenvvar=None):
1279 inheritchecker=None, parentenvvar=None):
1280 parentlock = None
1280 parentlock = None
1281 # the contents of parentenvvar are used by the underlying lock to
1281 # the contents of parentenvvar are used by the underlying lock to
1282 # determine whether it can be inherited
1282 # determine whether it can be inherited
1283 if parentenvvar is not None:
1283 if parentenvvar is not None:
1284 parentlock = os.environ.get(parentenvvar)
1284 parentlock = os.environ.get(parentenvvar)
1285 try:
1285 try:
1286 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1286 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1287 acquirefn=acquirefn, desc=desc,
1287 acquirefn=acquirefn, desc=desc,
1288 inheritchecker=inheritchecker,
1288 inheritchecker=inheritchecker,
1289 parentlock=parentlock)
1289 parentlock=parentlock)
1290 except error.LockHeld as inst:
1290 except error.LockHeld as inst:
1291 if not wait:
1291 if not wait:
1292 raise
1292 raise
1293 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1293 # show more details for new-style locks
1294 (desc, inst.locker))
1294 if ':' in inst.locker:
1295 host, pid = inst.locker.split(":", 1)
1296 self.ui.warn(
1297 _("waiting for lock on %s held by process %r "
1298 "on host %r\n") % (desc, pid, host))
1299 else:
1300 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1301 (desc, inst.locker))
1295 # default to 600 seconds timeout
1302 # default to 600 seconds timeout
1296 l = lockmod.lock(vfs, lockname,
1303 l = lockmod.lock(vfs, lockname,
1297 int(self.ui.config("ui", "timeout", "600")),
1304 int(self.ui.config("ui", "timeout", "600")),
1298 releasefn=releasefn, acquirefn=acquirefn,
1305 releasefn=releasefn, acquirefn=acquirefn,
1299 desc=desc)
1306 desc=desc)
1300 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1307 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1301 return l
1308 return l
1302
1309
1303 def _afterlock(self, callback):
1310 def _afterlock(self, callback):
1304 """add a callback to be run when the repository is fully unlocked
1311 """add a callback to be run when the repository is fully unlocked
1305
1312
1306 The callback will be executed when the outermost lock is released
1313 The callback will be executed when the outermost lock is released
1307 (with wlock being higher level than 'lock')."""
1314 (with wlock being higher level than 'lock')."""
1308 for ref in (self._wlockref, self._lockref):
1315 for ref in (self._wlockref, self._lockref):
1309 l = ref and ref()
1316 l = ref and ref()
1310 if l and l.held:
1317 if l and l.held:
1311 l.postrelease.append(callback)
1318 l.postrelease.append(callback)
1312 break
1319 break
1313 else: # no lock have been found.
1320 else: # no lock have been found.
1314 callback()
1321 callback()
1315
1322
1316 def lock(self, wait=True):
1323 def lock(self, wait=True):
1317 '''Lock the repository store (.hg/store) and return a weak reference
1324 '''Lock the repository store (.hg/store) and return a weak reference
1318 to the lock. Use this before modifying the store (e.g. committing or
1325 to the lock. Use this before modifying the store (e.g. committing or
1319 stripping). If you are opening a transaction, get a lock as well.)
1326 stripping). If you are opening a transaction, get a lock as well.)
1320
1327
1321 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1328 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1322 'wlock' first to avoid a dead-lock hazard.'''
1329 'wlock' first to avoid a dead-lock hazard.'''
1323 l = self._currentlock(self._lockref)
1330 l = self._currentlock(self._lockref)
1324 if l is not None:
1331 if l is not None:
1325 l.lock()
1332 l.lock()
1326 return l
1333 return l
1327
1334
1328 l = self._lock(self.svfs, "lock", wait, None,
1335 l = self._lock(self.svfs, "lock", wait, None,
1329 self.invalidate, _('repository %s') % self.origroot)
1336 self.invalidate, _('repository %s') % self.origroot)
1330 self._lockref = weakref.ref(l)
1337 self._lockref = weakref.ref(l)
1331 return l
1338 return l
1332
1339
1333 def _wlockchecktransaction(self):
1340 def _wlockchecktransaction(self):
1334 if self.currenttransaction() is not None:
1341 if self.currenttransaction() is not None:
1335 raise error.LockInheritanceContractViolation(
1342 raise error.LockInheritanceContractViolation(
1336 'wlock cannot be inherited in the middle of a transaction')
1343 'wlock cannot be inherited in the middle of a transaction')
1337
1344
1338 def wlock(self, wait=True):
1345 def wlock(self, wait=True):
1339 '''Lock the non-store parts of the repository (everything under
1346 '''Lock the non-store parts of the repository (everything under
1340 .hg except .hg/store) and return a weak reference to the lock.
1347 .hg except .hg/store) and return a weak reference to the lock.
1341
1348
1342 Use this before modifying files in .hg.
1349 Use this before modifying files in .hg.
1343
1350
1344 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1351 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1345 'wlock' first to avoid a dead-lock hazard.'''
1352 'wlock' first to avoid a dead-lock hazard.'''
1346 l = self._wlockref and self._wlockref()
1353 l = self._wlockref and self._wlockref()
1347 if l is not None and l.held:
1354 if l is not None and l.held:
1348 l.lock()
1355 l.lock()
1349 return l
1356 return l
1350
1357
1351 # We do not need to check for non-waiting lock acquisition. Such
1358 # We do not need to check for non-waiting lock acquisition. Such
1352 # acquisition would not cause dead-lock as they would just fail.
1359 # acquisition would not cause dead-lock as they would just fail.
1353 if wait and (self.ui.configbool('devel', 'all-warnings')
1360 if wait and (self.ui.configbool('devel', 'all-warnings')
1354 or self.ui.configbool('devel', 'check-locks')):
1361 or self.ui.configbool('devel', 'check-locks')):
1355 if self._currentlock(self._lockref) is not None:
1362 if self._currentlock(self._lockref) is not None:
1356 self.ui.develwarn('"wlock" acquired after "lock"')
1363 self.ui.develwarn('"wlock" acquired after "lock"')
1357
1364
1358 def unlock():
1365 def unlock():
1359 if self.dirstate.pendingparentchange():
1366 if self.dirstate.pendingparentchange():
1360 self.dirstate.invalidate()
1367 self.dirstate.invalidate()
1361 else:
1368 else:
1362 self.dirstate.write(None)
1369 self.dirstate.write(None)
1363
1370
1364 self._filecache['dirstate'].refresh()
1371 self._filecache['dirstate'].refresh()
1365
1372
1366 l = self._lock(self.vfs, "wlock", wait, unlock,
1373 l = self._lock(self.vfs, "wlock", wait, unlock,
1367 self.invalidatedirstate, _('working directory of %s') %
1374 self.invalidatedirstate, _('working directory of %s') %
1368 self.origroot,
1375 self.origroot,
1369 inheritchecker=self._wlockchecktransaction,
1376 inheritchecker=self._wlockchecktransaction,
1370 parentenvvar='HG_WLOCK_LOCKER')
1377 parentenvvar='HG_WLOCK_LOCKER')
1371 self._wlockref = weakref.ref(l)
1378 self._wlockref = weakref.ref(l)
1372 return l
1379 return l
1373
1380
1374 def _currentlock(self, lockref):
1381 def _currentlock(self, lockref):
1375 """Returns the lock if it's held, or None if it's not."""
1382 """Returns the lock if it's held, or None if it's not."""
1376 if lockref is None:
1383 if lockref is None:
1377 return None
1384 return None
1378 l = lockref()
1385 l = lockref()
1379 if l is None or not l.held:
1386 if l is None or not l.held:
1380 return None
1387 return None
1381 return l
1388 return l
1382
1389
1383 def currentwlock(self):
1390 def currentwlock(self):
1384 """Returns the wlock if it's held, or None if it's not."""
1391 """Returns the wlock if it's held, or None if it's not."""
1385 return self._currentlock(self._wlockref)
1392 return self._currentlock(self._wlockref)
1386
1393
1387 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1394 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1388 """
1395 """
1389 commit an individual file as part of a larger transaction
1396 commit an individual file as part of a larger transaction
1390 """
1397 """
1391
1398
1392 fname = fctx.path()
1399 fname = fctx.path()
1393 fparent1 = manifest1.get(fname, nullid)
1400 fparent1 = manifest1.get(fname, nullid)
1394 fparent2 = manifest2.get(fname, nullid)
1401 fparent2 = manifest2.get(fname, nullid)
1395 if isinstance(fctx, context.filectx):
1402 if isinstance(fctx, context.filectx):
1396 node = fctx.filenode()
1403 node = fctx.filenode()
1397 if node in [fparent1, fparent2]:
1404 if node in [fparent1, fparent2]:
1398 self.ui.debug('reusing %s filelog entry\n' % fname)
1405 self.ui.debug('reusing %s filelog entry\n' % fname)
1399 if manifest1.flags(fname) != fctx.flags():
1406 if manifest1.flags(fname) != fctx.flags():
1400 changelist.append(fname)
1407 changelist.append(fname)
1401 return node
1408 return node
1402
1409
1403 flog = self.file(fname)
1410 flog = self.file(fname)
1404 meta = {}
1411 meta = {}
1405 copy = fctx.renamed()
1412 copy = fctx.renamed()
1406 if copy and copy[0] != fname:
1413 if copy and copy[0] != fname:
1407 # Mark the new revision of this file as a copy of another
1414 # Mark the new revision of this file as a copy of another
1408 # file. This copy data will effectively act as a parent
1415 # file. This copy data will effectively act as a parent
1409 # of this new revision. If this is a merge, the first
1416 # of this new revision. If this is a merge, the first
1410 # parent will be the nullid (meaning "look up the copy data")
1417 # parent will be the nullid (meaning "look up the copy data")
1411 # and the second one will be the other parent. For example:
1418 # and the second one will be the other parent. For example:
1412 #
1419 #
1413 # 0 --- 1 --- 3 rev1 changes file foo
1420 # 0 --- 1 --- 3 rev1 changes file foo
1414 # \ / rev2 renames foo to bar and changes it
1421 # \ / rev2 renames foo to bar and changes it
1415 # \- 2 -/ rev3 should have bar with all changes and
1422 # \- 2 -/ rev3 should have bar with all changes and
1416 # should record that bar descends from
1423 # should record that bar descends from
1417 # bar in rev2 and foo in rev1
1424 # bar in rev2 and foo in rev1
1418 #
1425 #
1419 # this allows this merge to succeed:
1426 # this allows this merge to succeed:
1420 #
1427 #
1421 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1428 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1422 # \ / merging rev3 and rev4 should use bar@rev2
1429 # \ / merging rev3 and rev4 should use bar@rev2
1423 # \- 2 --- 4 as the merge base
1430 # \- 2 --- 4 as the merge base
1424 #
1431 #
1425
1432
1426 cfname = copy[0]
1433 cfname = copy[0]
1427 crev = manifest1.get(cfname)
1434 crev = manifest1.get(cfname)
1428 newfparent = fparent2
1435 newfparent = fparent2
1429
1436
1430 if manifest2: # branch merge
1437 if manifest2: # branch merge
1431 if fparent2 == nullid or crev is None: # copied on remote side
1438 if fparent2 == nullid or crev is None: # copied on remote side
1432 if cfname in manifest2:
1439 if cfname in manifest2:
1433 crev = manifest2[cfname]
1440 crev = manifest2[cfname]
1434 newfparent = fparent1
1441 newfparent = fparent1
1435
1442
1436 # Here, we used to search backwards through history to try to find
1443 # Here, we used to search backwards through history to try to find
1437 # where the file copy came from if the source of a copy was not in
1444 # where the file copy came from if the source of a copy was not in
1438 # the parent directory. However, this doesn't actually make sense to
1445 # the parent directory. However, this doesn't actually make sense to
1439 # do (what does a copy from something not in your working copy even
1446 # do (what does a copy from something not in your working copy even
1440 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1447 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1441 # the user that copy information was dropped, so if they didn't
1448 # the user that copy information was dropped, so if they didn't
1442 # expect this outcome it can be fixed, but this is the correct
1449 # expect this outcome it can be fixed, but this is the correct
1443 # behavior in this circumstance.
1450 # behavior in this circumstance.
1444
1451
1445 if crev:
1452 if crev:
1446 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1453 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1447 meta["copy"] = cfname
1454 meta["copy"] = cfname
1448 meta["copyrev"] = hex(crev)
1455 meta["copyrev"] = hex(crev)
1449 fparent1, fparent2 = nullid, newfparent
1456 fparent1, fparent2 = nullid, newfparent
1450 else:
1457 else:
1451 self.ui.warn(_("warning: can't find ancestor for '%s' "
1458 self.ui.warn(_("warning: can't find ancestor for '%s' "
1452 "copied from '%s'!\n") % (fname, cfname))
1459 "copied from '%s'!\n") % (fname, cfname))
1453
1460
1454 elif fparent1 == nullid:
1461 elif fparent1 == nullid:
1455 fparent1, fparent2 = fparent2, nullid
1462 fparent1, fparent2 = fparent2, nullid
1456 elif fparent2 != nullid:
1463 elif fparent2 != nullid:
1457 # is one parent an ancestor of the other?
1464 # is one parent an ancestor of the other?
1458 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1465 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1459 if fparent1 in fparentancestors:
1466 if fparent1 in fparentancestors:
1460 fparent1, fparent2 = fparent2, nullid
1467 fparent1, fparent2 = fparent2, nullid
1461 elif fparent2 in fparentancestors:
1468 elif fparent2 in fparentancestors:
1462 fparent2 = nullid
1469 fparent2 = nullid
1463
1470
1464 # is the file changed?
1471 # is the file changed?
1465 text = fctx.data()
1472 text = fctx.data()
1466 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1473 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1467 changelist.append(fname)
1474 changelist.append(fname)
1468 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1475 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1469 # are just the flags changed during merge?
1476 # are just the flags changed during merge?
1470 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1477 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1471 changelist.append(fname)
1478 changelist.append(fname)
1472
1479
1473 return fparent1
1480 return fparent1
1474
1481
1475 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1482 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1476 """check for commit arguments that aren't commitable"""
1483 """check for commit arguments that aren't commitable"""
1477 if match.isexact() or match.prefix():
1484 if match.isexact() or match.prefix():
1478 matched = set(status.modified + status.added + status.removed)
1485 matched = set(status.modified + status.added + status.removed)
1479
1486
1480 for f in match.files():
1487 for f in match.files():
1481 f = self.dirstate.normalize(f)
1488 f = self.dirstate.normalize(f)
1482 if f == '.' or f in matched or f in wctx.substate:
1489 if f == '.' or f in matched or f in wctx.substate:
1483 continue
1490 continue
1484 if f in status.deleted:
1491 if f in status.deleted:
1485 fail(f, _('file not found!'))
1492 fail(f, _('file not found!'))
1486 if f in vdirs: # visited directory
1493 if f in vdirs: # visited directory
1487 d = f + '/'
1494 d = f + '/'
1488 for mf in matched:
1495 for mf in matched:
1489 if mf.startswith(d):
1496 if mf.startswith(d):
1490 break
1497 break
1491 else:
1498 else:
1492 fail(f, _("no match under directory!"))
1499 fail(f, _("no match under directory!"))
1493 elif f not in self.dirstate:
1500 elif f not in self.dirstate:
1494 fail(f, _("file not tracked!"))
1501 fail(f, _("file not tracked!"))
1495
1502
1496 @unfilteredmethod
1503 @unfilteredmethod
1497 def commit(self, text="", user=None, date=None, match=None, force=False,
1504 def commit(self, text="", user=None, date=None, match=None, force=False,
1498 editor=False, extra=None):
1505 editor=False, extra=None):
1499 """Add a new revision to current repository.
1506 """Add a new revision to current repository.
1500
1507
1501 Revision information is gathered from the working directory,
1508 Revision information is gathered from the working directory,
1502 match can be used to filter the committed files. If editor is
1509 match can be used to filter the committed files. If editor is
1503 supplied, it is called to get a commit message.
1510 supplied, it is called to get a commit message.
1504 """
1511 """
1505 if extra is None:
1512 if extra is None:
1506 extra = {}
1513 extra = {}
1507
1514
1508 def fail(f, msg):
1515 def fail(f, msg):
1509 raise error.Abort('%s: %s' % (f, msg))
1516 raise error.Abort('%s: %s' % (f, msg))
1510
1517
1511 if not match:
1518 if not match:
1512 match = matchmod.always(self.root, '')
1519 match = matchmod.always(self.root, '')
1513
1520
1514 if not force:
1521 if not force:
1515 vdirs = []
1522 vdirs = []
1516 match.explicitdir = vdirs.append
1523 match.explicitdir = vdirs.append
1517 match.bad = fail
1524 match.bad = fail
1518
1525
1519 wlock = lock = tr = None
1526 wlock = lock = tr = None
1520 try:
1527 try:
1521 wlock = self.wlock()
1528 wlock = self.wlock()
1522 lock = self.lock() # for recent changelog (see issue4368)
1529 lock = self.lock() # for recent changelog (see issue4368)
1523
1530
1524 wctx = self[None]
1531 wctx = self[None]
1525 merge = len(wctx.parents()) > 1
1532 merge = len(wctx.parents()) > 1
1526
1533
1527 if not force and merge and match.ispartial():
1534 if not force and merge and match.ispartial():
1528 raise error.Abort(_('cannot partially commit a merge '
1535 raise error.Abort(_('cannot partially commit a merge '
1529 '(do not specify files or patterns)'))
1536 '(do not specify files or patterns)'))
1530
1537
1531 status = self.status(match=match, clean=force)
1538 status = self.status(match=match, clean=force)
1532 if force:
1539 if force:
1533 status.modified.extend(status.clean) # mq may commit clean files
1540 status.modified.extend(status.clean) # mq may commit clean files
1534
1541
1535 # check subrepos
1542 # check subrepos
1536 subs = []
1543 subs = []
1537 commitsubs = set()
1544 commitsubs = set()
1538 newstate = wctx.substate.copy()
1545 newstate = wctx.substate.copy()
1539 # only manage subrepos and .hgsubstate if .hgsub is present
1546 # only manage subrepos and .hgsubstate if .hgsub is present
1540 if '.hgsub' in wctx:
1547 if '.hgsub' in wctx:
1541 # we'll decide whether to track this ourselves, thanks
1548 # we'll decide whether to track this ourselves, thanks
1542 for c in status.modified, status.added, status.removed:
1549 for c in status.modified, status.added, status.removed:
1543 if '.hgsubstate' in c:
1550 if '.hgsubstate' in c:
1544 c.remove('.hgsubstate')
1551 c.remove('.hgsubstate')
1545
1552
1546 # compare current state to last committed state
1553 # compare current state to last committed state
1547 # build new substate based on last committed state
1554 # build new substate based on last committed state
1548 oldstate = wctx.p1().substate
1555 oldstate = wctx.p1().substate
1549 for s in sorted(newstate.keys()):
1556 for s in sorted(newstate.keys()):
1550 if not match(s):
1557 if not match(s):
1551 # ignore working copy, use old state if present
1558 # ignore working copy, use old state if present
1552 if s in oldstate:
1559 if s in oldstate:
1553 newstate[s] = oldstate[s]
1560 newstate[s] = oldstate[s]
1554 continue
1561 continue
1555 if not force:
1562 if not force:
1556 raise error.Abort(
1563 raise error.Abort(
1557 _("commit with new subrepo %s excluded") % s)
1564 _("commit with new subrepo %s excluded") % s)
1558 dirtyreason = wctx.sub(s).dirtyreason(True)
1565 dirtyreason = wctx.sub(s).dirtyreason(True)
1559 if dirtyreason:
1566 if dirtyreason:
1560 if not self.ui.configbool('ui', 'commitsubrepos'):
1567 if not self.ui.configbool('ui', 'commitsubrepos'):
1561 raise error.Abort(dirtyreason,
1568 raise error.Abort(dirtyreason,
1562 hint=_("use --subrepos for recursive commit"))
1569 hint=_("use --subrepos for recursive commit"))
1563 subs.append(s)
1570 subs.append(s)
1564 commitsubs.add(s)
1571 commitsubs.add(s)
1565 else:
1572 else:
1566 bs = wctx.sub(s).basestate()
1573 bs = wctx.sub(s).basestate()
1567 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1574 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1568 if oldstate.get(s, (None, None, None))[1] != bs:
1575 if oldstate.get(s, (None, None, None))[1] != bs:
1569 subs.append(s)
1576 subs.append(s)
1570
1577
1571 # check for removed subrepos
1578 # check for removed subrepos
1572 for p in wctx.parents():
1579 for p in wctx.parents():
1573 r = [s for s in p.substate if s not in newstate]
1580 r = [s for s in p.substate if s not in newstate]
1574 subs += [s for s in r if match(s)]
1581 subs += [s for s in r if match(s)]
1575 if subs:
1582 if subs:
1576 if (not match('.hgsub') and
1583 if (not match('.hgsub') and
1577 '.hgsub' in (wctx.modified() + wctx.added())):
1584 '.hgsub' in (wctx.modified() + wctx.added())):
1578 raise error.Abort(
1585 raise error.Abort(
1579 _("can't commit subrepos without .hgsub"))
1586 _("can't commit subrepos without .hgsub"))
1580 status.modified.insert(0, '.hgsubstate')
1587 status.modified.insert(0, '.hgsubstate')
1581
1588
1582 elif '.hgsub' in status.removed:
1589 elif '.hgsub' in status.removed:
1583 # clean up .hgsubstate when .hgsub is removed
1590 # clean up .hgsubstate when .hgsub is removed
1584 if ('.hgsubstate' in wctx and
1591 if ('.hgsubstate' in wctx and
1585 '.hgsubstate' not in (status.modified + status.added +
1592 '.hgsubstate' not in (status.modified + status.added +
1586 status.removed)):
1593 status.removed)):
1587 status.removed.insert(0, '.hgsubstate')
1594 status.removed.insert(0, '.hgsubstate')
1588
1595
1589 # make sure all explicit patterns are matched
1596 # make sure all explicit patterns are matched
1590 if not force:
1597 if not force:
1591 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1598 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1592
1599
1593 cctx = context.workingcommitctx(self, status,
1600 cctx = context.workingcommitctx(self, status,
1594 text, user, date, extra)
1601 text, user, date, extra)
1595
1602
1596 # internal config: ui.allowemptycommit
1603 # internal config: ui.allowemptycommit
1597 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1604 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1598 or extra.get('close') or merge or cctx.files()
1605 or extra.get('close') or merge or cctx.files()
1599 or self.ui.configbool('ui', 'allowemptycommit'))
1606 or self.ui.configbool('ui', 'allowemptycommit'))
1600 if not allowemptycommit:
1607 if not allowemptycommit:
1601 return None
1608 return None
1602
1609
1603 if merge and cctx.deleted():
1610 if merge and cctx.deleted():
1604 raise error.Abort(_("cannot commit merge with missing files"))
1611 raise error.Abort(_("cannot commit merge with missing files"))
1605
1612
1606 ms = mergemod.mergestate.read(self)
1613 ms = mergemod.mergestate.read(self)
1607
1614
1608 if list(ms.unresolved()):
1615 if list(ms.unresolved()):
1609 raise error.Abort(_('unresolved merge conflicts '
1616 raise error.Abort(_('unresolved merge conflicts '
1610 '(see "hg help resolve")'))
1617 '(see "hg help resolve")'))
1611 if ms.mdstate() != 's' or list(ms.driverresolved()):
1618 if ms.mdstate() != 's' or list(ms.driverresolved()):
1612 raise error.Abort(_('driver-resolved merge conflicts'),
1619 raise error.Abort(_('driver-resolved merge conflicts'),
1613 hint=_('run "hg resolve --all" to resolve'))
1620 hint=_('run "hg resolve --all" to resolve'))
1614
1621
1615 if editor:
1622 if editor:
1616 cctx._text = editor(self, cctx, subs)
1623 cctx._text = editor(self, cctx, subs)
1617 edited = (text != cctx._text)
1624 edited = (text != cctx._text)
1618
1625
1619 # Save commit message in case this transaction gets rolled back
1626 # Save commit message in case this transaction gets rolled back
1620 # (e.g. by a pretxncommit hook). Leave the content alone on
1627 # (e.g. by a pretxncommit hook). Leave the content alone on
1621 # the assumption that the user will use the same editor again.
1628 # the assumption that the user will use the same editor again.
1622 msgfn = self.savecommitmessage(cctx._text)
1629 msgfn = self.savecommitmessage(cctx._text)
1623
1630
1624 # commit subs and write new state
1631 # commit subs and write new state
1625 if subs:
1632 if subs:
1626 for s in sorted(commitsubs):
1633 for s in sorted(commitsubs):
1627 sub = wctx.sub(s)
1634 sub = wctx.sub(s)
1628 self.ui.status(_('committing subrepository %s\n') %
1635 self.ui.status(_('committing subrepository %s\n') %
1629 subrepo.subrelpath(sub))
1636 subrepo.subrelpath(sub))
1630 sr = sub.commit(cctx._text, user, date)
1637 sr = sub.commit(cctx._text, user, date)
1631 newstate[s] = (newstate[s][0], sr)
1638 newstate[s] = (newstate[s][0], sr)
1632 subrepo.writestate(self, newstate)
1639 subrepo.writestate(self, newstate)
1633
1640
1634 p1, p2 = self.dirstate.parents()
1641 p1, p2 = self.dirstate.parents()
1635 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1642 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1636 try:
1643 try:
1637 self.hook("precommit", throw=True, parent1=hookp1,
1644 self.hook("precommit", throw=True, parent1=hookp1,
1638 parent2=hookp2)
1645 parent2=hookp2)
1639 tr = self.transaction('commit')
1646 tr = self.transaction('commit')
1640 ret = self.commitctx(cctx, True)
1647 ret = self.commitctx(cctx, True)
1641 except: # re-raises
1648 except: # re-raises
1642 if edited:
1649 if edited:
1643 self.ui.write(
1650 self.ui.write(
1644 _('note: commit message saved in %s\n') % msgfn)
1651 _('note: commit message saved in %s\n') % msgfn)
1645 raise
1652 raise
1646 # update bookmarks, dirstate and mergestate
1653 # update bookmarks, dirstate and mergestate
1647 bookmarks.update(self, [p1, p2], ret)
1654 bookmarks.update(self, [p1, p2], ret)
1648 cctx.markcommitted(ret)
1655 cctx.markcommitted(ret)
1649 ms.reset()
1656 ms.reset()
1650 tr.close()
1657 tr.close()
1651
1658
1652 finally:
1659 finally:
1653 lockmod.release(tr, lock, wlock)
1660 lockmod.release(tr, lock, wlock)
1654
1661
1655 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1662 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1656 # hack for command that use a temporary commit (eg: histedit)
1663 # hack for command that use a temporary commit (eg: histedit)
1657 # temporary commit got stripped before hook release
1664 # temporary commit got stripped before hook release
1658 if self.changelog.hasnode(ret):
1665 if self.changelog.hasnode(ret):
1659 self.hook("commit", node=node, parent1=parent1,
1666 self.hook("commit", node=node, parent1=parent1,
1660 parent2=parent2)
1667 parent2=parent2)
1661 self._afterlock(commithook)
1668 self._afterlock(commithook)
1662 return ret
1669 return ret
1663
1670
1664 @unfilteredmethod
1671 @unfilteredmethod
1665 def commitctx(self, ctx, error=False):
1672 def commitctx(self, ctx, error=False):
1666 """Add a new revision to current repository.
1673 """Add a new revision to current repository.
1667 Revision information is passed via the context argument.
1674 Revision information is passed via the context argument.
1668 """
1675 """
1669
1676
1670 tr = None
1677 tr = None
1671 p1, p2 = ctx.p1(), ctx.p2()
1678 p1, p2 = ctx.p1(), ctx.p2()
1672 user = ctx.user()
1679 user = ctx.user()
1673
1680
1674 lock = self.lock()
1681 lock = self.lock()
1675 try:
1682 try:
1676 tr = self.transaction("commit")
1683 tr = self.transaction("commit")
1677 trp = weakref.proxy(tr)
1684 trp = weakref.proxy(tr)
1678
1685
1679 if ctx.files():
1686 if ctx.files():
1680 m1 = p1.manifest()
1687 m1 = p1.manifest()
1681 m2 = p2.manifest()
1688 m2 = p2.manifest()
1682 m = m1.copy()
1689 m = m1.copy()
1683
1690
1684 # check in files
1691 # check in files
1685 added = []
1692 added = []
1686 changed = []
1693 changed = []
1687 removed = list(ctx.removed())
1694 removed = list(ctx.removed())
1688 linkrev = len(self)
1695 linkrev = len(self)
1689 self.ui.note(_("committing files:\n"))
1696 self.ui.note(_("committing files:\n"))
1690 for f in sorted(ctx.modified() + ctx.added()):
1697 for f in sorted(ctx.modified() + ctx.added()):
1691 self.ui.note(f + "\n")
1698 self.ui.note(f + "\n")
1692 try:
1699 try:
1693 fctx = ctx[f]
1700 fctx = ctx[f]
1694 if fctx is None:
1701 if fctx is None:
1695 removed.append(f)
1702 removed.append(f)
1696 else:
1703 else:
1697 added.append(f)
1704 added.append(f)
1698 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1705 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1699 trp, changed)
1706 trp, changed)
1700 m.setflag(f, fctx.flags())
1707 m.setflag(f, fctx.flags())
1701 except OSError as inst:
1708 except OSError as inst:
1702 self.ui.warn(_("trouble committing %s!\n") % f)
1709 self.ui.warn(_("trouble committing %s!\n") % f)
1703 raise
1710 raise
1704 except IOError as inst:
1711 except IOError as inst:
1705 errcode = getattr(inst, 'errno', errno.ENOENT)
1712 errcode = getattr(inst, 'errno', errno.ENOENT)
1706 if error or errcode and errcode != errno.ENOENT:
1713 if error or errcode and errcode != errno.ENOENT:
1707 self.ui.warn(_("trouble committing %s!\n") % f)
1714 self.ui.warn(_("trouble committing %s!\n") % f)
1708 raise
1715 raise
1709
1716
1710 # update manifest
1717 # update manifest
1711 self.ui.note(_("committing manifest\n"))
1718 self.ui.note(_("committing manifest\n"))
1712 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1719 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1713 drop = [f for f in removed if f in m]
1720 drop = [f for f in removed if f in m]
1714 for f in drop:
1721 for f in drop:
1715 del m[f]
1722 del m[f]
1716 mn = self.manifest.add(m, trp, linkrev,
1723 mn = self.manifest.add(m, trp, linkrev,
1717 p1.manifestnode(), p2.manifestnode(),
1724 p1.manifestnode(), p2.manifestnode(),
1718 added, drop)
1725 added, drop)
1719 files = changed + removed
1726 files = changed + removed
1720 else:
1727 else:
1721 mn = p1.manifestnode()
1728 mn = p1.manifestnode()
1722 files = []
1729 files = []
1723
1730
1724 # update changelog
1731 # update changelog
1725 self.ui.note(_("committing changelog\n"))
1732 self.ui.note(_("committing changelog\n"))
1726 self.changelog.delayupdate(tr)
1733 self.changelog.delayupdate(tr)
1727 n = self.changelog.add(mn, files, ctx.description(),
1734 n = self.changelog.add(mn, files, ctx.description(),
1728 trp, p1.node(), p2.node(),
1735 trp, p1.node(), p2.node(),
1729 user, ctx.date(), ctx.extra().copy())
1736 user, ctx.date(), ctx.extra().copy())
1730 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1737 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1731 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1738 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1732 parent2=xp2)
1739 parent2=xp2)
1733 # set the new commit is proper phase
1740 # set the new commit is proper phase
1734 targetphase = subrepo.newcommitphase(self.ui, ctx)
1741 targetphase = subrepo.newcommitphase(self.ui, ctx)
1735 if targetphase:
1742 if targetphase:
1736 # retract boundary do not alter parent changeset.
1743 # retract boundary do not alter parent changeset.
1737 # if a parent have higher the resulting phase will
1744 # if a parent have higher the resulting phase will
1738 # be compliant anyway
1745 # be compliant anyway
1739 #
1746 #
1740 # if minimal phase was 0 we don't need to retract anything
1747 # if minimal phase was 0 we don't need to retract anything
1741 phases.retractboundary(self, tr, targetphase, [n])
1748 phases.retractboundary(self, tr, targetphase, [n])
1742 tr.close()
1749 tr.close()
1743 branchmap.updatecache(self.filtered('served'))
1750 branchmap.updatecache(self.filtered('served'))
1744 return n
1751 return n
1745 finally:
1752 finally:
1746 if tr:
1753 if tr:
1747 tr.release()
1754 tr.release()
1748 lock.release()
1755 lock.release()
1749
1756
1750 @unfilteredmethod
1757 @unfilteredmethod
1751 def destroying(self):
1758 def destroying(self):
1752 '''Inform the repository that nodes are about to be destroyed.
1759 '''Inform the repository that nodes are about to be destroyed.
1753 Intended for use by strip and rollback, so there's a common
1760 Intended for use by strip and rollback, so there's a common
1754 place for anything that has to be done before destroying history.
1761 place for anything that has to be done before destroying history.
1755
1762
1756 This is mostly useful for saving state that is in memory and waiting
1763 This is mostly useful for saving state that is in memory and waiting
1757 to be flushed when the current lock is released. Because a call to
1764 to be flushed when the current lock is released. Because a call to
1758 destroyed is imminent, the repo will be invalidated causing those
1765 destroyed is imminent, the repo will be invalidated causing those
1759 changes to stay in memory (waiting for the next unlock), or vanish
1766 changes to stay in memory (waiting for the next unlock), or vanish
1760 completely.
1767 completely.
1761 '''
1768 '''
1762 # When using the same lock to commit and strip, the phasecache is left
1769 # When using the same lock to commit and strip, the phasecache is left
1763 # dirty after committing. Then when we strip, the repo is invalidated,
1770 # dirty after committing. Then when we strip, the repo is invalidated,
1764 # causing those changes to disappear.
1771 # causing those changes to disappear.
1765 if '_phasecache' in vars(self):
1772 if '_phasecache' in vars(self):
1766 self._phasecache.write()
1773 self._phasecache.write()
1767
1774
1768 @unfilteredmethod
1775 @unfilteredmethod
1769 def destroyed(self):
1776 def destroyed(self):
1770 '''Inform the repository that nodes have been destroyed.
1777 '''Inform the repository that nodes have been destroyed.
1771 Intended for use by strip and rollback, so there's a common
1778 Intended for use by strip and rollback, so there's a common
1772 place for anything that has to be done after destroying history.
1779 place for anything that has to be done after destroying history.
1773 '''
1780 '''
1774 # When one tries to:
1781 # When one tries to:
1775 # 1) destroy nodes thus calling this method (e.g. strip)
1782 # 1) destroy nodes thus calling this method (e.g. strip)
1776 # 2) use phasecache somewhere (e.g. commit)
1783 # 2) use phasecache somewhere (e.g. commit)
1777 #
1784 #
1778 # then 2) will fail because the phasecache contains nodes that were
1785 # then 2) will fail because the phasecache contains nodes that were
1779 # removed. We can either remove phasecache from the filecache,
1786 # removed. We can either remove phasecache from the filecache,
1780 # causing it to reload next time it is accessed, or simply filter
1787 # causing it to reload next time it is accessed, or simply filter
1781 # the removed nodes now and write the updated cache.
1788 # the removed nodes now and write the updated cache.
1782 self._phasecache.filterunknown(self)
1789 self._phasecache.filterunknown(self)
1783 self._phasecache.write()
1790 self._phasecache.write()
1784
1791
1785 # update the 'served' branch cache to help read only server process
1792 # update the 'served' branch cache to help read only server process
1786 # Thanks to branchcache collaboration this is done from the nearest
1793 # Thanks to branchcache collaboration this is done from the nearest
1787 # filtered subset and it is expected to be fast.
1794 # filtered subset and it is expected to be fast.
1788 branchmap.updatecache(self.filtered('served'))
1795 branchmap.updatecache(self.filtered('served'))
1789
1796
1790 # Ensure the persistent tag cache is updated. Doing it now
1797 # Ensure the persistent tag cache is updated. Doing it now
1791 # means that the tag cache only has to worry about destroyed
1798 # means that the tag cache only has to worry about destroyed
1792 # heads immediately after a strip/rollback. That in turn
1799 # heads immediately after a strip/rollback. That in turn
1793 # guarantees that "cachetip == currenttip" (comparing both rev
1800 # guarantees that "cachetip == currenttip" (comparing both rev
1794 # and node) always means no nodes have been added or destroyed.
1801 # and node) always means no nodes have been added or destroyed.
1795
1802
1796 # XXX this is suboptimal when qrefresh'ing: we strip the current
1803 # XXX this is suboptimal when qrefresh'ing: we strip the current
1797 # head, refresh the tag cache, then immediately add a new head.
1804 # head, refresh the tag cache, then immediately add a new head.
1798 # But I think doing it this way is necessary for the "instant
1805 # But I think doing it this way is necessary for the "instant
1799 # tag cache retrieval" case to work.
1806 # tag cache retrieval" case to work.
1800 self.invalidate()
1807 self.invalidate()
1801
1808
1802 def walk(self, match, node=None):
1809 def walk(self, match, node=None):
1803 '''
1810 '''
1804 walk recursively through the directory tree or a given
1811 walk recursively through the directory tree or a given
1805 changeset, finding all files matched by the match
1812 changeset, finding all files matched by the match
1806 function
1813 function
1807 '''
1814 '''
1808 return self[node].walk(match)
1815 return self[node].walk(match)
1809
1816
1810 def status(self, node1='.', node2=None, match=None,
1817 def status(self, node1='.', node2=None, match=None,
1811 ignored=False, clean=False, unknown=False,
1818 ignored=False, clean=False, unknown=False,
1812 listsubrepos=False):
1819 listsubrepos=False):
1813 '''a convenience method that calls node1.status(node2)'''
1820 '''a convenience method that calls node1.status(node2)'''
1814 return self[node1].status(node2, match, ignored, clean, unknown,
1821 return self[node1].status(node2, match, ignored, clean, unknown,
1815 listsubrepos)
1822 listsubrepos)
1816
1823
1817 def heads(self, start=None):
1824 def heads(self, start=None):
1818 heads = self.changelog.heads(start)
1825 heads = self.changelog.heads(start)
1819 # sort the output in rev descending order
1826 # sort the output in rev descending order
1820 return sorted(heads, key=self.changelog.rev, reverse=True)
1827 return sorted(heads, key=self.changelog.rev, reverse=True)
1821
1828
1822 def branchheads(self, branch=None, start=None, closed=False):
1829 def branchheads(self, branch=None, start=None, closed=False):
1823 '''return a (possibly filtered) list of heads for the given branch
1830 '''return a (possibly filtered) list of heads for the given branch
1824
1831
1825 Heads are returned in topological order, from newest to oldest.
1832 Heads are returned in topological order, from newest to oldest.
1826 If branch is None, use the dirstate branch.
1833 If branch is None, use the dirstate branch.
1827 If start is not None, return only heads reachable from start.
1834 If start is not None, return only heads reachable from start.
1828 If closed is True, return heads that are marked as closed as well.
1835 If closed is True, return heads that are marked as closed as well.
1829 '''
1836 '''
1830 if branch is None:
1837 if branch is None:
1831 branch = self[None].branch()
1838 branch = self[None].branch()
1832 branches = self.branchmap()
1839 branches = self.branchmap()
1833 if branch not in branches:
1840 if branch not in branches:
1834 return []
1841 return []
1835 # the cache returns heads ordered lowest to highest
1842 # the cache returns heads ordered lowest to highest
1836 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1843 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1837 if start is not None:
1844 if start is not None:
1838 # filter out the heads that cannot be reached from startrev
1845 # filter out the heads that cannot be reached from startrev
1839 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1846 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1840 bheads = [h for h in bheads if h in fbheads]
1847 bheads = [h for h in bheads if h in fbheads]
1841 return bheads
1848 return bheads
1842
1849
1843 def branches(self, nodes):
1850 def branches(self, nodes):
1844 if not nodes:
1851 if not nodes:
1845 nodes = [self.changelog.tip()]
1852 nodes = [self.changelog.tip()]
1846 b = []
1853 b = []
1847 for n in nodes:
1854 for n in nodes:
1848 t = n
1855 t = n
1849 while True:
1856 while True:
1850 p = self.changelog.parents(n)
1857 p = self.changelog.parents(n)
1851 if p[1] != nullid or p[0] == nullid:
1858 if p[1] != nullid or p[0] == nullid:
1852 b.append((t, n, p[0], p[1]))
1859 b.append((t, n, p[0], p[1]))
1853 break
1860 break
1854 n = p[0]
1861 n = p[0]
1855 return b
1862 return b
1856
1863
1857 def between(self, pairs):
1864 def between(self, pairs):
1858 r = []
1865 r = []
1859
1866
1860 for top, bottom in pairs:
1867 for top, bottom in pairs:
1861 n, l, i = top, [], 0
1868 n, l, i = top, [], 0
1862 f = 1
1869 f = 1
1863
1870
1864 while n != bottom and n != nullid:
1871 while n != bottom and n != nullid:
1865 p = self.changelog.parents(n)[0]
1872 p = self.changelog.parents(n)[0]
1866 if i == f:
1873 if i == f:
1867 l.append(n)
1874 l.append(n)
1868 f = f * 2
1875 f = f * 2
1869 n = p
1876 n = p
1870 i += 1
1877 i += 1
1871
1878
1872 r.append(l)
1879 r.append(l)
1873
1880
1874 return r
1881 return r
1875
1882
1876 def checkpush(self, pushop):
1883 def checkpush(self, pushop):
1877 """Extensions can override this function if additional checks have
1884 """Extensions can override this function if additional checks have
1878 to be performed before pushing, or call it if they override push
1885 to be performed before pushing, or call it if they override push
1879 command.
1886 command.
1880 """
1887 """
1881 pass
1888 pass
1882
1889
1883 @unfilteredpropertycache
1890 @unfilteredpropertycache
1884 def prepushoutgoinghooks(self):
1891 def prepushoutgoinghooks(self):
1885 """Return util.hooks consists of a pushop with repo, remote, outgoing
1892 """Return util.hooks consists of a pushop with repo, remote, outgoing
1886 methods, which are called before pushing changesets.
1893 methods, which are called before pushing changesets.
1887 """
1894 """
1888 return util.hooks()
1895 return util.hooks()
1889
1896
1890 def pushkey(self, namespace, key, old, new):
1897 def pushkey(self, namespace, key, old, new):
1891 try:
1898 try:
1892 tr = self.currenttransaction()
1899 tr = self.currenttransaction()
1893 hookargs = {}
1900 hookargs = {}
1894 if tr is not None:
1901 if tr is not None:
1895 hookargs.update(tr.hookargs)
1902 hookargs.update(tr.hookargs)
1896 hookargs['namespace'] = namespace
1903 hookargs['namespace'] = namespace
1897 hookargs['key'] = key
1904 hookargs['key'] = key
1898 hookargs['old'] = old
1905 hookargs['old'] = old
1899 hookargs['new'] = new
1906 hookargs['new'] = new
1900 self.hook('prepushkey', throw=True, **hookargs)
1907 self.hook('prepushkey', throw=True, **hookargs)
1901 except error.HookAbort as exc:
1908 except error.HookAbort as exc:
1902 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1909 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1903 if exc.hint:
1910 if exc.hint:
1904 self.ui.write_err(_("(%s)\n") % exc.hint)
1911 self.ui.write_err(_("(%s)\n") % exc.hint)
1905 return False
1912 return False
1906 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1913 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1907 ret = pushkey.push(self, namespace, key, old, new)
1914 ret = pushkey.push(self, namespace, key, old, new)
1908 def runhook():
1915 def runhook():
1909 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1916 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1910 ret=ret)
1917 ret=ret)
1911 self._afterlock(runhook)
1918 self._afterlock(runhook)
1912 return ret
1919 return ret
1913
1920
1914 def listkeys(self, namespace):
1921 def listkeys(self, namespace):
1915 self.hook('prelistkeys', throw=True, namespace=namespace)
1922 self.hook('prelistkeys', throw=True, namespace=namespace)
1916 self.ui.debug('listing keys for "%s"\n' % namespace)
1923 self.ui.debug('listing keys for "%s"\n' % namespace)
1917 values = pushkey.list(self, namespace)
1924 values = pushkey.list(self, namespace)
1918 self.hook('listkeys', namespace=namespace, values=values)
1925 self.hook('listkeys', namespace=namespace, values=values)
1919 return values
1926 return values
1920
1927
1921 def debugwireargs(self, one, two, three=None, four=None, five=None):
1928 def debugwireargs(self, one, two, three=None, four=None, five=None):
1922 '''used to test argument passing over the wire'''
1929 '''used to test argument passing over the wire'''
1923 return "%s %s %s %s %s" % (one, two, three, four, five)
1930 return "%s %s %s %s %s" % (one, two, three, four, five)
1924
1931
1925 def savecommitmessage(self, text):
1932 def savecommitmessage(self, text):
1926 fp = self.vfs('last-message.txt', 'wb')
1933 fp = self.vfs('last-message.txt', 'wb')
1927 try:
1934 try:
1928 fp.write(text)
1935 fp.write(text)
1929 finally:
1936 finally:
1930 fp.close()
1937 fp.close()
1931 return self.pathto(fp.name[len(self.root) + 1:])
1938 return self.pathto(fp.name[len(self.root) + 1:])
1932
1939
1933 # used to avoid circular references so destructors work
1940 # used to avoid circular references so destructors work
1934 def aftertrans(files):
1941 def aftertrans(files):
1935 renamefiles = [tuple(t) for t in files]
1942 renamefiles = [tuple(t) for t in files]
1936 def a():
1943 def a():
1937 for vfs, src, dest in renamefiles:
1944 for vfs, src, dest in renamefiles:
1938 try:
1945 try:
1939 vfs.rename(src, dest)
1946 vfs.rename(src, dest)
1940 except OSError: # journal file does not yet exist
1947 except OSError: # journal file does not yet exist
1941 pass
1948 pass
1942 return a
1949 return a
1943
1950
1944 def undoname(fn):
1951 def undoname(fn):
1945 base, name = os.path.split(fn)
1952 base, name = os.path.split(fn)
1946 assert name.startswith('journal')
1953 assert name.startswith('journal')
1947 return os.path.join(base, name.replace('journal', 'undo', 1))
1954 return os.path.join(base, name.replace('journal', 'undo', 1))
1948
1955
1949 def instance(ui, path, create):
1956 def instance(ui, path, create):
1950 return localrepository(ui, util.urllocalpath(path), create)
1957 return localrepository(ui, util.urllocalpath(path), create)
1951
1958
1952 def islocal(path):
1959 def islocal(path):
1953 return True
1960 return True
1954
1961
1955 def newreporequirements(repo):
1962 def newreporequirements(repo):
1956 """Determine the set of requirements for a new local repository.
1963 """Determine the set of requirements for a new local repository.
1957
1964
1958 Extensions can wrap this function to specify custom requirements for
1965 Extensions can wrap this function to specify custom requirements for
1959 new repositories.
1966 new repositories.
1960 """
1967 """
1961 ui = repo.ui
1968 ui = repo.ui
1962 requirements = set(['revlogv1'])
1969 requirements = set(['revlogv1'])
1963 if ui.configbool('format', 'usestore', True):
1970 if ui.configbool('format', 'usestore', True):
1964 requirements.add('store')
1971 requirements.add('store')
1965 if ui.configbool('format', 'usefncache', True):
1972 if ui.configbool('format', 'usefncache', True):
1966 requirements.add('fncache')
1973 requirements.add('fncache')
1967 if ui.configbool('format', 'dotencode', True):
1974 if ui.configbool('format', 'dotencode', True):
1968 requirements.add('dotencode')
1975 requirements.add('dotencode')
1969
1976
1970 if scmutil.gdinitconfig(ui):
1977 if scmutil.gdinitconfig(ui):
1971 requirements.add('generaldelta')
1978 requirements.add('generaldelta')
1972 if ui.configbool('experimental', 'treemanifest', False):
1979 if ui.configbool('experimental', 'treemanifest', False):
1973 requirements.add('treemanifest')
1980 requirements.add('treemanifest')
1974 if ui.configbool('experimental', 'manifestv2', False):
1981 if ui.configbool('experimental', 'manifestv2', False):
1975 requirements.add('manifestv2')
1982 requirements.add('manifestv2')
1976
1983
1977 return requirements
1984 return requirements
@@ -1,78 +1,78 b''
1 #require unix-permissions no-root no-windows
1 #require unix-permissions no-root no-windows
2
2
3 Prepare
3 Prepare
4
4
5 $ hg init a
5 $ hg init a
6 $ echo a > a/a
6 $ echo a > a/a
7 $ hg -R a ci -A -m a
7 $ hg -R a ci -A -m a
8 adding a
8 adding a
9
9
10 $ hg clone a b
10 $ hg clone a b
11 updating to branch default
11 updating to branch default
12 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
13
13
14 Test that raising an exception in the release function doesn't cause the lock to choke
14 Test that raising an exception in the release function doesn't cause the lock to choke
15
15
16 $ cat > testlock.py << EOF
16 $ cat > testlock.py << EOF
17 > from mercurial import cmdutil, error, error
17 > from mercurial import cmdutil, error, error
18 >
18 >
19 > cmdtable = {}
19 > cmdtable = {}
20 > command = cmdutil.command(cmdtable)
20 > command = cmdutil.command(cmdtable)
21 >
21 >
22 > def acquiretestlock(repo, releaseexc):
22 > def acquiretestlock(repo, releaseexc):
23 > def unlock():
23 > def unlock():
24 > if releaseexc:
24 > if releaseexc:
25 > raise error.Abort('expected release exception')
25 > raise error.Abort('expected release exception')
26 > l = repo._lock(repo.vfs, 'testlock', False, unlock, None, 'test lock')
26 > l = repo._lock(repo.vfs, 'testlock', False, unlock, None, 'test lock')
27 > return l
27 > return l
28 >
28 >
29 > @command('testlockexc')
29 > @command('testlockexc')
30 > def testlockexc(ui, repo):
30 > def testlockexc(ui, repo):
31 > testlock = acquiretestlock(repo, True)
31 > testlock = acquiretestlock(repo, True)
32 > try:
32 > try:
33 > testlock.release()
33 > testlock.release()
34 > finally:
34 > finally:
35 > try:
35 > try:
36 > testlock = acquiretestlock(repo, False)
36 > testlock = acquiretestlock(repo, False)
37 > except error.LockHeld:
37 > except error.LockHeld:
38 > raise error.Abort('lockfile on disk even after releasing!')
38 > raise error.Abort('lockfile on disk even after releasing!')
39 > testlock.release()
39 > testlock.release()
40 > EOF
40 > EOF
41 $ cat >> $HGRCPATH << EOF
41 $ cat >> $HGRCPATH << EOF
42 > [extensions]
42 > [extensions]
43 > testlock=$TESTTMP/testlock.py
43 > testlock=$TESTTMP/testlock.py
44 > EOF
44 > EOF
45
45
46 $ hg -R b testlockexc
46 $ hg -R b testlockexc
47 abort: expected release exception
47 abort: expected release exception
48 [255]
48 [255]
49
49
50 One process waiting for another
50 One process waiting for another
51
51
52 $ cat > hooks.py << EOF
52 $ cat > hooks.py << EOF
53 > import time
53 > import time
54 > def sleepone(**x): time.sleep(1)
54 > def sleepone(**x): time.sleep(1)
55 > def sleephalf(**x): time.sleep(0.5)
55 > def sleephalf(**x): time.sleep(0.5)
56 > EOF
56 > EOF
57 $ echo b > b/b
57 $ echo b > b/b
58 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
58 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
59 $ hg -R b up -q --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
59 $ hg -R b up -q --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
60 > > preup 2>&1
60 > > preup 2>&1
61 $ wait
61 $ wait
62 $ cat preup
62 $ cat preup
63 waiting for lock on working directory of b held by '*:*' (glob)
63 waiting for lock on working directory of b held by process '*' on host '*' (glob)
64 got lock after * seconds (glob)
64 got lock after * seconds (glob)
65 $ cat stdout
65 $ cat stdout
66 adding b
66 adding b
67
67
68 Pushing to a local read-only repo that can't be locked
68 Pushing to a local read-only repo that can't be locked
69
69
70 $ chmod 100 a/.hg/store
70 $ chmod 100 a/.hg/store
71
71
72 $ hg -R b push a
72 $ hg -R b push a
73 pushing to a
73 pushing to a
74 searching for changes
74 searching for changes
75 abort: could not lock repository a: Permission denied
75 abort: could not lock repository a: Permission denied
76 [255]
76 [255]
77
77
78 $ chmod 700 a/.hg/store
78 $ chmod 700 a/.hg/store
General Comments 0
You need to be logged in to leave comments. Login now