##// END OF EJS Templates
wlock: only issue devel warning when actually acquiring the lock...
Pierre-Yves David -
r24744:bedefc61 default
parent child Browse files
Show More
@@ -1,1926 +1,1927 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 cg = exchange.readbundle(self.ui, cg, None)
128 cg = exchange.readbundle(self.ui, cg, None)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 if util.safehasattr(ret, 'getchunks'):
130 if util.safehasattr(ret, 'getchunks'):
131 # This is a bundle20 object, turn it into an unbundler.
131 # This is a bundle20 object, turn it into an unbundler.
132 # This little dance should be dropped eventually when the API
132 # This little dance should be dropped eventually when the API
133 # is finally improved.
133 # is finally improved.
134 stream = util.chunkbuffer(ret.getchunks())
134 stream = util.chunkbuffer(ret.getchunks())
135 ret = bundle2.getunbundler(self.ui, stream)
135 ret = bundle2.getunbundler(self.ui, stream)
136 return ret
136 return ret
137 except error.PushRaced, exc:
137 except error.PushRaced, exc:
138 raise error.ResponseError(_('push failed:'), str(exc))
138 raise error.ResponseError(_('push failed:'), str(exc))
139
139
140 def lock(self):
140 def lock(self):
141 return self._repo.lock()
141 return self._repo.lock()
142
142
143 def addchangegroup(self, cg, source, url):
143 def addchangegroup(self, cg, source, url):
144 return changegroup.addchangegroup(self._repo, cg, source, url)
144 return changegroup.addchangegroup(self._repo, cg, source, url)
145
145
146 def pushkey(self, namespace, key, old, new):
146 def pushkey(self, namespace, key, old, new):
147 return self._repo.pushkey(namespace, key, old, new)
147 return self._repo.pushkey(namespace, key, old, new)
148
148
149 def listkeys(self, namespace):
149 def listkeys(self, namespace):
150 return self._repo.listkeys(namespace)
150 return self._repo.listkeys(namespace)
151
151
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 '''used to test argument passing over the wire'''
153 '''used to test argument passing over the wire'''
154 return "%s %s %s %s %s" % (one, two, three, four, five)
154 return "%s %s %s %s %s" % (one, two, three, four, five)
155
155
156 class locallegacypeer(localpeer):
156 class locallegacypeer(localpeer):
157 '''peer extension which implements legacy methods too; used for tests with
157 '''peer extension which implements legacy methods too; used for tests with
158 restricted capabilities'''
158 restricted capabilities'''
159
159
160 def __init__(self, repo):
160 def __init__(self, repo):
161 localpeer.__init__(self, repo, caps=legacycaps)
161 localpeer.__init__(self, repo, caps=legacycaps)
162
162
163 def branches(self, nodes):
163 def branches(self, nodes):
164 return self._repo.branches(nodes)
164 return self._repo.branches(nodes)
165
165
166 def between(self, pairs):
166 def between(self, pairs):
167 return self._repo.between(pairs)
167 return self._repo.between(pairs)
168
168
169 def changegroup(self, basenodes, source):
169 def changegroup(self, basenodes, source):
170 return changegroup.changegroup(self._repo, basenodes, source)
170 return changegroup.changegroup(self._repo, basenodes, source)
171
171
172 def changegroupsubset(self, bases, heads, source):
172 def changegroupsubset(self, bases, heads, source):
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174
174
175 class localrepository(object):
175 class localrepository(object):
176
176
177 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
177 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 'dotencode'))
179 'dotencode'))
180 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
180 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
181 requirements = ['revlogv1']
181 requirements = ['revlogv1']
182 filtername = None
182 filtername = None
183
183
184 # a list of (ui, featureset) functions.
184 # a list of (ui, featureset) functions.
185 # only functions defined in module of enabled extensions are invoked
185 # only functions defined in module of enabled extensions are invoked
186 featuresetupfuncs = set()
186 featuresetupfuncs = set()
187
187
188 def _baserequirements(self, create):
188 def _baserequirements(self, create):
189 return self.requirements[:]
189 return self.requirements[:]
190
190
191 def __init__(self, baseui, path=None, create=False):
191 def __init__(self, baseui, path=None, create=False):
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wopener = self.wvfs
193 self.wopener = self.wvfs
194 self.root = self.wvfs.base
194 self.root = self.wvfs.base
195 self.path = self.wvfs.join(".hg")
195 self.path = self.wvfs.join(".hg")
196 self.origroot = path
196 self.origroot = path
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.vfs = scmutil.vfs(self.path)
198 self.vfs = scmutil.vfs(self.path)
199 self.opener = self.vfs
199 self.opener = self.vfs
200 self.baseui = baseui
200 self.baseui = baseui
201 self.ui = baseui.copy()
201 self.ui = baseui.copy()
202 self.ui.copy = baseui.copy # prevent copying repo configuration
202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 # A list of callback to shape the phase if no data were found.
203 # A list of callback to shape the phase if no data were found.
204 # Callback are in the form: func(repo, roots) --> processed root.
204 # Callback are in the form: func(repo, roots) --> processed root.
205 # This list it to be filled by extension during repo setup
205 # This list it to be filled by extension during repo setup
206 self._phasedefaults = []
206 self._phasedefaults = []
207 try:
207 try:
208 self.ui.readconfig(self.join("hgrc"), self.root)
208 self.ui.readconfig(self.join("hgrc"), self.root)
209 extensions.loadall(self.ui)
209 extensions.loadall(self.ui)
210 except IOError:
210 except IOError:
211 pass
211 pass
212
212
213 if self.featuresetupfuncs:
213 if self.featuresetupfuncs:
214 self.supported = set(self._basesupported) # use private copy
214 self.supported = set(self._basesupported) # use private copy
215 extmods = set(m.__name__ for n, m
215 extmods = set(m.__name__ for n, m
216 in extensions.extensions(self.ui))
216 in extensions.extensions(self.ui))
217 for setupfunc in self.featuresetupfuncs:
217 for setupfunc in self.featuresetupfuncs:
218 if setupfunc.__module__ in extmods:
218 if setupfunc.__module__ in extmods:
219 setupfunc(self.ui, self.supported)
219 setupfunc(self.ui, self.supported)
220 else:
220 else:
221 self.supported = self._basesupported
221 self.supported = self._basesupported
222
222
223 if not self.vfs.isdir():
223 if not self.vfs.isdir():
224 if create:
224 if create:
225 if not self.wvfs.exists():
225 if not self.wvfs.exists():
226 self.wvfs.makedirs()
226 self.wvfs.makedirs()
227 self.vfs.makedir(notindexed=True)
227 self.vfs.makedir(notindexed=True)
228 requirements = self._baserequirements(create)
228 requirements = self._baserequirements(create)
229 if self.ui.configbool('format', 'usestore', True):
229 if self.ui.configbool('format', 'usestore', True):
230 self.vfs.mkdir("store")
230 self.vfs.mkdir("store")
231 requirements.append("store")
231 requirements.append("store")
232 if self.ui.configbool('format', 'usefncache', True):
232 if self.ui.configbool('format', 'usefncache', True):
233 requirements.append("fncache")
233 requirements.append("fncache")
234 if self.ui.configbool('format', 'dotencode', True):
234 if self.ui.configbool('format', 'dotencode', True):
235 requirements.append('dotencode')
235 requirements.append('dotencode')
236 # create an invalid changelog
236 # create an invalid changelog
237 self.vfs.append(
237 self.vfs.append(
238 "00changelog.i",
238 "00changelog.i",
239 '\0\0\0\2' # represents revlogv2
239 '\0\0\0\2' # represents revlogv2
240 ' dummy changelog to prevent using the old repo layout'
240 ' dummy changelog to prevent using the old repo layout'
241 )
241 )
242 if self.ui.configbool('format', 'generaldelta', False):
242 if self.ui.configbool('format', 'generaldelta', False):
243 requirements.append("generaldelta")
243 requirements.append("generaldelta")
244 if self.ui.configbool('experimental', 'manifestv2', False):
244 if self.ui.configbool('experimental', 'manifestv2', False):
245 requirements.append("manifestv2")
245 requirements.append("manifestv2")
246 requirements = set(requirements)
246 requirements = set(requirements)
247 else:
247 else:
248 raise error.RepoError(_("repository %s not found") % path)
248 raise error.RepoError(_("repository %s not found") % path)
249 elif create:
249 elif create:
250 raise error.RepoError(_("repository %s already exists") % path)
250 raise error.RepoError(_("repository %s already exists") % path)
251 else:
251 else:
252 try:
252 try:
253 requirements = scmutil.readrequires(self.vfs, self.supported)
253 requirements = scmutil.readrequires(self.vfs, self.supported)
254 except IOError, inst:
254 except IOError, inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 requirements = set()
257 requirements = set()
258
258
259 self.sharedpath = self.path
259 self.sharedpath = self.path
260 try:
260 try:
261 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
262 realpath=True)
262 realpath=True)
263 s = vfs.base
263 s = vfs.base
264 if not vfs.exists():
264 if not vfs.exists():
265 raise error.RepoError(
265 raise error.RepoError(
266 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 _('.hg/sharedpath points to nonexistent directory %s') % s)
267 self.sharedpath = s
267 self.sharedpath = s
268 except IOError, inst:
268 except IOError, inst:
269 if inst.errno != errno.ENOENT:
269 if inst.errno != errno.ENOENT:
270 raise
270 raise
271
271
272 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
273 self.spath = self.store.path
273 self.spath = self.store.path
274 self.svfs = self.store.vfs
274 self.svfs = self.store.vfs
275 self.sopener = self.svfs
275 self.sopener = self.svfs
276 self.sjoin = self.store.join
276 self.sjoin = self.store.join
277 self.vfs.createmode = self.store.createmode
277 self.vfs.createmode = self.store.createmode
278 self._applyrequirements(requirements)
278 self._applyrequirements(requirements)
279 if create:
279 if create:
280 self._writerequirements()
280 self._writerequirements()
281
281
282
282
283 self._branchcaches = {}
283 self._branchcaches = {}
284 self._revbranchcache = None
284 self._revbranchcache = None
285 self.filterpats = {}
285 self.filterpats = {}
286 self._datafilters = {}
286 self._datafilters = {}
287 self._transref = self._lockref = self._wlockref = None
287 self._transref = self._lockref = self._wlockref = None
288
288
289 # A cache for various files under .hg/ that tracks file changes,
289 # A cache for various files under .hg/ that tracks file changes,
290 # (used by the filecache decorator)
290 # (used by the filecache decorator)
291 #
291 #
292 # Maps a property name to its util.filecacheentry
292 # Maps a property name to its util.filecacheentry
293 self._filecache = {}
293 self._filecache = {}
294
294
295 # hold sets of revision to be filtered
295 # hold sets of revision to be filtered
296 # should be cleared when something might have changed the filter value:
296 # should be cleared when something might have changed the filter value:
297 # - new changesets,
297 # - new changesets,
298 # - phase change,
298 # - phase change,
299 # - new obsolescence marker,
299 # - new obsolescence marker,
300 # - working directory parent change,
300 # - working directory parent change,
301 # - bookmark changes
301 # - bookmark changes
302 self.filteredrevcache = {}
302 self.filteredrevcache = {}
303
303
304 # generic mapping between names and nodes
304 # generic mapping between names and nodes
305 self.names = namespaces.namespaces()
305 self.names = namespaces.namespaces()
306
306
307 def close(self):
307 def close(self):
308 self._writecaches()
308 self._writecaches()
309
309
310 def _writecaches(self):
310 def _writecaches(self):
311 if self._revbranchcache:
311 if self._revbranchcache:
312 self._revbranchcache.write()
312 self._revbranchcache.write()
313
313
314 def _restrictcapabilities(self, caps):
314 def _restrictcapabilities(self, caps):
315 if self.ui.configbool('experimental', 'bundle2-advertise', True):
315 if self.ui.configbool('experimental', 'bundle2-advertise', True):
316 caps = set(caps)
316 caps = set(caps)
317 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
317 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
318 caps.add('bundle2=' + urllib.quote(capsblob))
318 caps.add('bundle2=' + urllib.quote(capsblob))
319 return caps
319 return caps
320
320
321 def _applyrequirements(self, requirements):
321 def _applyrequirements(self, requirements):
322 self.requirements = requirements
322 self.requirements = requirements
323 self.svfs.options = dict((r, 1) for r in requirements
323 self.svfs.options = dict((r, 1) for r in requirements
324 if r in self.openerreqs)
324 if r in self.openerreqs)
325 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
325 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
326 if chunkcachesize is not None:
326 if chunkcachesize is not None:
327 self.svfs.options['chunkcachesize'] = chunkcachesize
327 self.svfs.options['chunkcachesize'] = chunkcachesize
328 maxchainlen = self.ui.configint('format', 'maxchainlen')
328 maxchainlen = self.ui.configint('format', 'maxchainlen')
329 if maxchainlen is not None:
329 if maxchainlen is not None:
330 self.svfs.options['maxchainlen'] = maxchainlen
330 self.svfs.options['maxchainlen'] = maxchainlen
331 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
331 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
332 if manifestcachesize is not None:
332 if manifestcachesize is not None:
333 self.svfs.options['manifestcachesize'] = manifestcachesize
333 self.svfs.options['manifestcachesize'] = manifestcachesize
334 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
334 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
335 if usetreemanifest is not None:
335 if usetreemanifest is not None:
336 self.svfs.options['usetreemanifest'] = usetreemanifest
336 self.svfs.options['usetreemanifest'] = usetreemanifest
337
337
338 def _writerequirements(self):
338 def _writerequirements(self):
339 reqfile = self.vfs("requires", "w")
339 reqfile = self.vfs("requires", "w")
340 for r in sorted(self.requirements):
340 for r in sorted(self.requirements):
341 reqfile.write("%s\n" % r)
341 reqfile.write("%s\n" % r)
342 reqfile.close()
342 reqfile.close()
343
343
344 def _checknested(self, path):
344 def _checknested(self, path):
345 """Determine if path is a legal nested repository."""
345 """Determine if path is a legal nested repository."""
346 if not path.startswith(self.root):
346 if not path.startswith(self.root):
347 return False
347 return False
348 subpath = path[len(self.root) + 1:]
348 subpath = path[len(self.root) + 1:]
349 normsubpath = util.pconvert(subpath)
349 normsubpath = util.pconvert(subpath)
350
350
351 # XXX: Checking against the current working copy is wrong in
351 # XXX: Checking against the current working copy is wrong in
352 # the sense that it can reject things like
352 # the sense that it can reject things like
353 #
353 #
354 # $ hg cat -r 10 sub/x.txt
354 # $ hg cat -r 10 sub/x.txt
355 #
355 #
356 # if sub/ is no longer a subrepository in the working copy
356 # if sub/ is no longer a subrepository in the working copy
357 # parent revision.
357 # parent revision.
358 #
358 #
359 # However, it can of course also allow things that would have
359 # However, it can of course also allow things that would have
360 # been rejected before, such as the above cat command if sub/
360 # been rejected before, such as the above cat command if sub/
361 # is a subrepository now, but was a normal directory before.
361 # is a subrepository now, but was a normal directory before.
362 # The old path auditor would have rejected by mistake since it
362 # The old path auditor would have rejected by mistake since it
363 # panics when it sees sub/.hg/.
363 # panics when it sees sub/.hg/.
364 #
364 #
365 # All in all, checking against the working copy seems sensible
365 # All in all, checking against the working copy seems sensible
366 # since we want to prevent access to nested repositories on
366 # since we want to prevent access to nested repositories on
367 # the filesystem *now*.
367 # the filesystem *now*.
368 ctx = self[None]
368 ctx = self[None]
369 parts = util.splitpath(subpath)
369 parts = util.splitpath(subpath)
370 while parts:
370 while parts:
371 prefix = '/'.join(parts)
371 prefix = '/'.join(parts)
372 if prefix in ctx.substate:
372 if prefix in ctx.substate:
373 if prefix == normsubpath:
373 if prefix == normsubpath:
374 return True
374 return True
375 else:
375 else:
376 sub = ctx.sub(prefix)
376 sub = ctx.sub(prefix)
377 return sub.checknested(subpath[len(prefix) + 1:])
377 return sub.checknested(subpath[len(prefix) + 1:])
378 else:
378 else:
379 parts.pop()
379 parts.pop()
380 return False
380 return False
381
381
382 def peer(self):
382 def peer(self):
383 return localpeer(self) # not cached to avoid reference cycle
383 return localpeer(self) # not cached to avoid reference cycle
384
384
385 def unfiltered(self):
385 def unfiltered(self):
386 """Return unfiltered version of the repository
386 """Return unfiltered version of the repository
387
387
388 Intended to be overwritten by filtered repo."""
388 Intended to be overwritten by filtered repo."""
389 return self
389 return self
390
390
391 def filtered(self, name):
391 def filtered(self, name):
392 """Return a filtered version of a repository"""
392 """Return a filtered version of a repository"""
393 # build a new class with the mixin and the current class
393 # build a new class with the mixin and the current class
394 # (possibly subclass of the repo)
394 # (possibly subclass of the repo)
395 class proxycls(repoview.repoview, self.unfiltered().__class__):
395 class proxycls(repoview.repoview, self.unfiltered().__class__):
396 pass
396 pass
397 return proxycls(self, name)
397 return proxycls(self, name)
398
398
399 @repofilecache('bookmarks')
399 @repofilecache('bookmarks')
400 def _bookmarks(self):
400 def _bookmarks(self):
401 return bookmarks.bmstore(self)
401 return bookmarks.bmstore(self)
402
402
403 @repofilecache('bookmarks.current')
403 @repofilecache('bookmarks.current')
404 def _bookmarkcurrent(self):
404 def _bookmarkcurrent(self):
405 return bookmarks.readcurrent(self)
405 return bookmarks.readcurrent(self)
406
406
407 def bookmarkheads(self, bookmark):
407 def bookmarkheads(self, bookmark):
408 name = bookmark.split('@', 1)[0]
408 name = bookmark.split('@', 1)[0]
409 heads = []
409 heads = []
410 for mark, n in self._bookmarks.iteritems():
410 for mark, n in self._bookmarks.iteritems():
411 if mark.split('@', 1)[0] == name:
411 if mark.split('@', 1)[0] == name:
412 heads.append(n)
412 heads.append(n)
413 return heads
413 return heads
414
414
415 @storecache('phaseroots')
415 @storecache('phaseroots')
416 def _phasecache(self):
416 def _phasecache(self):
417 return phases.phasecache(self, self._phasedefaults)
417 return phases.phasecache(self, self._phasedefaults)
418
418
419 @storecache('obsstore')
419 @storecache('obsstore')
420 def obsstore(self):
420 def obsstore(self):
421 # read default format for new obsstore.
421 # read default format for new obsstore.
422 defaultformat = self.ui.configint('format', 'obsstore-version', None)
422 defaultformat = self.ui.configint('format', 'obsstore-version', None)
423 # rely on obsstore class default when possible.
423 # rely on obsstore class default when possible.
424 kwargs = {}
424 kwargs = {}
425 if defaultformat is not None:
425 if defaultformat is not None:
426 kwargs['defaultformat'] = defaultformat
426 kwargs['defaultformat'] = defaultformat
427 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
427 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
428 store = obsolete.obsstore(self.svfs, readonly=readonly,
428 store = obsolete.obsstore(self.svfs, readonly=readonly,
429 **kwargs)
429 **kwargs)
430 if store and readonly:
430 if store and readonly:
431 self.ui.warn(
431 self.ui.warn(
432 _('obsolete feature not enabled but %i markers found!\n')
432 _('obsolete feature not enabled but %i markers found!\n')
433 % len(list(store)))
433 % len(list(store)))
434 return store
434 return store
435
435
436 @storecache('00changelog.i')
436 @storecache('00changelog.i')
437 def changelog(self):
437 def changelog(self):
438 c = changelog.changelog(self.svfs)
438 c = changelog.changelog(self.svfs)
439 if 'HG_PENDING' in os.environ:
439 if 'HG_PENDING' in os.environ:
440 p = os.environ['HG_PENDING']
440 p = os.environ['HG_PENDING']
441 if p.startswith(self.root):
441 if p.startswith(self.root):
442 c.readpending('00changelog.i.a')
442 c.readpending('00changelog.i.a')
443 return c
443 return c
444
444
445 @storecache('00manifest.i')
445 @storecache('00manifest.i')
446 def manifest(self):
446 def manifest(self):
447 return manifest.manifest(self.svfs)
447 return manifest.manifest(self.svfs)
448
448
449 @repofilecache('dirstate')
449 @repofilecache('dirstate')
450 def dirstate(self):
450 def dirstate(self):
451 warned = [0]
451 warned = [0]
452 def validate(node):
452 def validate(node):
453 try:
453 try:
454 self.changelog.rev(node)
454 self.changelog.rev(node)
455 return node
455 return node
456 except error.LookupError:
456 except error.LookupError:
457 if not warned[0]:
457 if not warned[0]:
458 warned[0] = True
458 warned[0] = True
459 self.ui.warn(_("warning: ignoring unknown"
459 self.ui.warn(_("warning: ignoring unknown"
460 " working parent %s!\n") % short(node))
460 " working parent %s!\n") % short(node))
461 return nullid
461 return nullid
462
462
463 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
463 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
464
464
465 def __getitem__(self, changeid):
465 def __getitem__(self, changeid):
466 if changeid is None:
466 if changeid is None:
467 return context.workingctx(self)
467 return context.workingctx(self)
468 if isinstance(changeid, slice):
468 if isinstance(changeid, slice):
469 return [context.changectx(self, i)
469 return [context.changectx(self, i)
470 for i in xrange(*changeid.indices(len(self)))
470 for i in xrange(*changeid.indices(len(self)))
471 if i not in self.changelog.filteredrevs]
471 if i not in self.changelog.filteredrevs]
472 return context.changectx(self, changeid)
472 return context.changectx(self, changeid)
473
473
474 def __contains__(self, changeid):
474 def __contains__(self, changeid):
475 try:
475 try:
476 self[changeid]
476 self[changeid]
477 return True
477 return True
478 except error.RepoLookupError:
478 except error.RepoLookupError:
479 return False
479 return False
480
480
481 def __nonzero__(self):
481 def __nonzero__(self):
482 return True
482 return True
483
483
484 def __len__(self):
484 def __len__(self):
485 return len(self.changelog)
485 return len(self.changelog)
486
486
487 def __iter__(self):
487 def __iter__(self):
488 return iter(self.changelog)
488 return iter(self.changelog)
489
489
490 def revs(self, expr, *args):
490 def revs(self, expr, *args):
491 '''Return a list of revisions matching the given revset'''
491 '''Return a list of revisions matching the given revset'''
492 expr = revset.formatspec(expr, *args)
492 expr = revset.formatspec(expr, *args)
493 m = revset.match(None, expr)
493 m = revset.match(None, expr)
494 return m(self)
494 return m(self)
495
495
496 def set(self, expr, *args):
496 def set(self, expr, *args):
497 '''
497 '''
498 Yield a context for each matching revision, after doing arg
498 Yield a context for each matching revision, after doing arg
499 replacement via revset.formatspec
499 replacement via revset.formatspec
500 '''
500 '''
501 for r in self.revs(expr, *args):
501 for r in self.revs(expr, *args):
502 yield self[r]
502 yield self[r]
503
503
504 def url(self):
504 def url(self):
505 return 'file:' + self.root
505 return 'file:' + self.root
506
506
507 def hook(self, name, throw=False, **args):
507 def hook(self, name, throw=False, **args):
508 """Call a hook, passing this repo instance.
508 """Call a hook, passing this repo instance.
509
509
510 This a convenience method to aid invoking hooks. Extensions likely
510 This a convenience method to aid invoking hooks. Extensions likely
511 won't call this unless they have registered a custom hook or are
511 won't call this unless they have registered a custom hook or are
512 replacing code that is expected to call a hook.
512 replacing code that is expected to call a hook.
513 """
513 """
514 return hook.hook(self.ui, self, name, throw, **args)
514 return hook.hook(self.ui, self, name, throw, **args)
515
515
516 @unfilteredmethod
516 @unfilteredmethod
517 def _tag(self, names, node, message, local, user, date, extra={},
517 def _tag(self, names, node, message, local, user, date, extra={},
518 editor=False):
518 editor=False):
519 if isinstance(names, str):
519 if isinstance(names, str):
520 names = (names,)
520 names = (names,)
521
521
522 branches = self.branchmap()
522 branches = self.branchmap()
523 for name in names:
523 for name in names:
524 self.hook('pretag', throw=True, node=hex(node), tag=name,
524 self.hook('pretag', throw=True, node=hex(node), tag=name,
525 local=local)
525 local=local)
526 if name in branches:
526 if name in branches:
527 self.ui.warn(_("warning: tag %s conflicts with existing"
527 self.ui.warn(_("warning: tag %s conflicts with existing"
528 " branch name\n") % name)
528 " branch name\n") % name)
529
529
530 def writetags(fp, names, munge, prevtags):
530 def writetags(fp, names, munge, prevtags):
531 fp.seek(0, 2)
531 fp.seek(0, 2)
532 if prevtags and prevtags[-1] != '\n':
532 if prevtags and prevtags[-1] != '\n':
533 fp.write('\n')
533 fp.write('\n')
534 for name in names:
534 for name in names:
535 if munge:
535 if munge:
536 m = munge(name)
536 m = munge(name)
537 else:
537 else:
538 m = name
538 m = name
539
539
540 if (self._tagscache.tagtypes and
540 if (self._tagscache.tagtypes and
541 name in self._tagscache.tagtypes):
541 name in self._tagscache.tagtypes):
542 old = self.tags().get(name, nullid)
542 old = self.tags().get(name, nullid)
543 fp.write('%s %s\n' % (hex(old), m))
543 fp.write('%s %s\n' % (hex(old), m))
544 fp.write('%s %s\n' % (hex(node), m))
544 fp.write('%s %s\n' % (hex(node), m))
545 fp.close()
545 fp.close()
546
546
547 prevtags = ''
547 prevtags = ''
548 if local:
548 if local:
549 try:
549 try:
550 fp = self.vfs('localtags', 'r+')
550 fp = self.vfs('localtags', 'r+')
551 except IOError:
551 except IOError:
552 fp = self.vfs('localtags', 'a')
552 fp = self.vfs('localtags', 'a')
553 else:
553 else:
554 prevtags = fp.read()
554 prevtags = fp.read()
555
555
556 # local tags are stored in the current charset
556 # local tags are stored in the current charset
557 writetags(fp, names, None, prevtags)
557 writetags(fp, names, None, prevtags)
558 for name in names:
558 for name in names:
559 self.hook('tag', node=hex(node), tag=name, local=local)
559 self.hook('tag', node=hex(node), tag=name, local=local)
560 return
560 return
561
561
562 try:
562 try:
563 fp = self.wfile('.hgtags', 'rb+')
563 fp = self.wfile('.hgtags', 'rb+')
564 except IOError, e:
564 except IOError, e:
565 if e.errno != errno.ENOENT:
565 if e.errno != errno.ENOENT:
566 raise
566 raise
567 fp = self.wfile('.hgtags', 'ab')
567 fp = self.wfile('.hgtags', 'ab')
568 else:
568 else:
569 prevtags = fp.read()
569 prevtags = fp.read()
570
570
571 # committed tags are stored in UTF-8
571 # committed tags are stored in UTF-8
572 writetags(fp, names, encoding.fromlocal, prevtags)
572 writetags(fp, names, encoding.fromlocal, prevtags)
573
573
574 fp.close()
574 fp.close()
575
575
576 self.invalidatecaches()
576 self.invalidatecaches()
577
577
578 if '.hgtags' not in self.dirstate:
578 if '.hgtags' not in self.dirstate:
579 self[None].add(['.hgtags'])
579 self[None].add(['.hgtags'])
580
580
581 m = matchmod.exact(self.root, '', ['.hgtags'])
581 m = matchmod.exact(self.root, '', ['.hgtags'])
582 tagnode = self.commit(message, user, date, extra=extra, match=m,
582 tagnode = self.commit(message, user, date, extra=extra, match=m,
583 editor=editor)
583 editor=editor)
584
584
585 for name in names:
585 for name in names:
586 self.hook('tag', node=hex(node), tag=name, local=local)
586 self.hook('tag', node=hex(node), tag=name, local=local)
587
587
588 return tagnode
588 return tagnode
589
589
590 def tag(self, names, node, message, local, user, date, editor=False):
590 def tag(self, names, node, message, local, user, date, editor=False):
591 '''tag a revision with one or more symbolic names.
591 '''tag a revision with one or more symbolic names.
592
592
593 names is a list of strings or, when adding a single tag, names may be a
593 names is a list of strings or, when adding a single tag, names may be a
594 string.
594 string.
595
595
596 if local is True, the tags are stored in a per-repository file.
596 if local is True, the tags are stored in a per-repository file.
597 otherwise, they are stored in the .hgtags file, and a new
597 otherwise, they are stored in the .hgtags file, and a new
598 changeset is committed with the change.
598 changeset is committed with the change.
599
599
600 keyword arguments:
600 keyword arguments:
601
601
602 local: whether to store tags in non-version-controlled file
602 local: whether to store tags in non-version-controlled file
603 (default False)
603 (default False)
604
604
605 message: commit message to use if committing
605 message: commit message to use if committing
606
606
607 user: name of user to use if committing
607 user: name of user to use if committing
608
608
609 date: date tuple to use if committing'''
609 date: date tuple to use if committing'''
610
610
611 if not local:
611 if not local:
612 m = matchmod.exact(self.root, '', ['.hgtags'])
612 m = matchmod.exact(self.root, '', ['.hgtags'])
613 if util.any(self.status(match=m, unknown=True, ignored=True)):
613 if util.any(self.status(match=m, unknown=True, ignored=True)):
614 raise util.Abort(_('working copy of .hgtags is changed'),
614 raise util.Abort(_('working copy of .hgtags is changed'),
615 hint=_('please commit .hgtags manually'))
615 hint=_('please commit .hgtags manually'))
616
616
617 self.tags() # instantiate the cache
617 self.tags() # instantiate the cache
618 self._tag(names, node, message, local, user, date, editor=editor)
618 self._tag(names, node, message, local, user, date, editor=editor)
619
619
620 @filteredpropertycache
620 @filteredpropertycache
621 def _tagscache(self):
621 def _tagscache(self):
622 '''Returns a tagscache object that contains various tags related
622 '''Returns a tagscache object that contains various tags related
623 caches.'''
623 caches.'''
624
624
625 # This simplifies its cache management by having one decorated
625 # This simplifies its cache management by having one decorated
626 # function (this one) and the rest simply fetch things from it.
626 # function (this one) and the rest simply fetch things from it.
627 class tagscache(object):
627 class tagscache(object):
628 def __init__(self):
628 def __init__(self):
629 # These two define the set of tags for this repository. tags
629 # These two define the set of tags for this repository. tags
630 # maps tag name to node; tagtypes maps tag name to 'global' or
630 # maps tag name to node; tagtypes maps tag name to 'global' or
631 # 'local'. (Global tags are defined by .hgtags across all
631 # 'local'. (Global tags are defined by .hgtags across all
632 # heads, and local tags are defined in .hg/localtags.)
632 # heads, and local tags are defined in .hg/localtags.)
633 # They constitute the in-memory cache of tags.
633 # They constitute the in-memory cache of tags.
634 self.tags = self.tagtypes = None
634 self.tags = self.tagtypes = None
635
635
636 self.nodetagscache = self.tagslist = None
636 self.nodetagscache = self.tagslist = None
637
637
638 cache = tagscache()
638 cache = tagscache()
639 cache.tags, cache.tagtypes = self._findtags()
639 cache.tags, cache.tagtypes = self._findtags()
640
640
641 return cache
641 return cache
642
642
643 def tags(self):
643 def tags(self):
644 '''return a mapping of tag to node'''
644 '''return a mapping of tag to node'''
645 t = {}
645 t = {}
646 if self.changelog.filteredrevs:
646 if self.changelog.filteredrevs:
647 tags, tt = self._findtags()
647 tags, tt = self._findtags()
648 else:
648 else:
649 tags = self._tagscache.tags
649 tags = self._tagscache.tags
650 for k, v in tags.iteritems():
650 for k, v in tags.iteritems():
651 try:
651 try:
652 # ignore tags to unknown nodes
652 # ignore tags to unknown nodes
653 self.changelog.rev(v)
653 self.changelog.rev(v)
654 t[k] = v
654 t[k] = v
655 except (error.LookupError, ValueError):
655 except (error.LookupError, ValueError):
656 pass
656 pass
657 return t
657 return t
658
658
659 def _findtags(self):
659 def _findtags(self):
660 '''Do the hard work of finding tags. Return a pair of dicts
660 '''Do the hard work of finding tags. Return a pair of dicts
661 (tags, tagtypes) where tags maps tag name to node, and tagtypes
661 (tags, tagtypes) where tags maps tag name to node, and tagtypes
662 maps tag name to a string like \'global\' or \'local\'.
662 maps tag name to a string like \'global\' or \'local\'.
663 Subclasses or extensions are free to add their own tags, but
663 Subclasses or extensions are free to add their own tags, but
664 should be aware that the returned dicts will be retained for the
664 should be aware that the returned dicts will be retained for the
665 duration of the localrepo object.'''
665 duration of the localrepo object.'''
666
666
667 # XXX what tagtype should subclasses/extensions use? Currently
667 # XXX what tagtype should subclasses/extensions use? Currently
668 # mq and bookmarks add tags, but do not set the tagtype at all.
668 # mq and bookmarks add tags, but do not set the tagtype at all.
669 # Should each extension invent its own tag type? Should there
669 # Should each extension invent its own tag type? Should there
670 # be one tagtype for all such "virtual" tags? Or is the status
670 # be one tagtype for all such "virtual" tags? Or is the status
671 # quo fine?
671 # quo fine?
672
672
673 alltags = {} # map tag name to (node, hist)
673 alltags = {} # map tag name to (node, hist)
674 tagtypes = {}
674 tagtypes = {}
675
675
676 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
676 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
677 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
677 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
678
678
679 # Build the return dicts. Have to re-encode tag names because
679 # Build the return dicts. Have to re-encode tag names because
680 # the tags module always uses UTF-8 (in order not to lose info
680 # the tags module always uses UTF-8 (in order not to lose info
681 # writing to the cache), but the rest of Mercurial wants them in
681 # writing to the cache), but the rest of Mercurial wants them in
682 # local encoding.
682 # local encoding.
683 tags = {}
683 tags = {}
684 for (name, (node, hist)) in alltags.iteritems():
684 for (name, (node, hist)) in alltags.iteritems():
685 if node != nullid:
685 if node != nullid:
686 tags[encoding.tolocal(name)] = node
686 tags[encoding.tolocal(name)] = node
687 tags['tip'] = self.changelog.tip()
687 tags['tip'] = self.changelog.tip()
688 tagtypes = dict([(encoding.tolocal(name), value)
688 tagtypes = dict([(encoding.tolocal(name), value)
689 for (name, value) in tagtypes.iteritems()])
689 for (name, value) in tagtypes.iteritems()])
690 return (tags, tagtypes)
690 return (tags, tagtypes)
691
691
692 def tagtype(self, tagname):
692 def tagtype(self, tagname):
693 '''
693 '''
694 return the type of the given tag. result can be:
694 return the type of the given tag. result can be:
695
695
696 'local' : a local tag
696 'local' : a local tag
697 'global' : a global tag
697 'global' : a global tag
698 None : tag does not exist
698 None : tag does not exist
699 '''
699 '''
700
700
701 return self._tagscache.tagtypes.get(tagname)
701 return self._tagscache.tagtypes.get(tagname)
702
702
703 def tagslist(self):
703 def tagslist(self):
704 '''return a list of tags ordered by revision'''
704 '''return a list of tags ordered by revision'''
705 if not self._tagscache.tagslist:
705 if not self._tagscache.tagslist:
706 l = []
706 l = []
707 for t, n in self.tags().iteritems():
707 for t, n in self.tags().iteritems():
708 l.append((self.changelog.rev(n), t, n))
708 l.append((self.changelog.rev(n), t, n))
709 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
709 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
710
710
711 return self._tagscache.tagslist
711 return self._tagscache.tagslist
712
712
713 def nodetags(self, node):
713 def nodetags(self, node):
714 '''return the tags associated with a node'''
714 '''return the tags associated with a node'''
715 if not self._tagscache.nodetagscache:
715 if not self._tagscache.nodetagscache:
716 nodetagscache = {}
716 nodetagscache = {}
717 for t, n in self._tagscache.tags.iteritems():
717 for t, n in self._tagscache.tags.iteritems():
718 nodetagscache.setdefault(n, []).append(t)
718 nodetagscache.setdefault(n, []).append(t)
719 for tags in nodetagscache.itervalues():
719 for tags in nodetagscache.itervalues():
720 tags.sort()
720 tags.sort()
721 self._tagscache.nodetagscache = nodetagscache
721 self._tagscache.nodetagscache = nodetagscache
722 return self._tagscache.nodetagscache.get(node, [])
722 return self._tagscache.nodetagscache.get(node, [])
723
723
724 def nodebookmarks(self, node):
724 def nodebookmarks(self, node):
725 marks = []
725 marks = []
726 for bookmark, n in self._bookmarks.iteritems():
726 for bookmark, n in self._bookmarks.iteritems():
727 if n == node:
727 if n == node:
728 marks.append(bookmark)
728 marks.append(bookmark)
729 return sorted(marks)
729 return sorted(marks)
730
730
731 def branchmap(self):
731 def branchmap(self):
732 '''returns a dictionary {branch: [branchheads]} with branchheads
732 '''returns a dictionary {branch: [branchheads]} with branchheads
733 ordered by increasing revision number'''
733 ordered by increasing revision number'''
734 branchmap.updatecache(self)
734 branchmap.updatecache(self)
735 return self._branchcaches[self.filtername]
735 return self._branchcaches[self.filtername]
736
736
737 @unfilteredmethod
737 @unfilteredmethod
738 def revbranchcache(self):
738 def revbranchcache(self):
739 if not self._revbranchcache:
739 if not self._revbranchcache:
740 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
740 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
741 return self._revbranchcache
741 return self._revbranchcache
742
742
743 def branchtip(self, branch, ignoremissing=False):
743 def branchtip(self, branch, ignoremissing=False):
744 '''return the tip node for a given branch
744 '''return the tip node for a given branch
745
745
746 If ignoremissing is True, then this method will not raise an error.
746 If ignoremissing is True, then this method will not raise an error.
747 This is helpful for callers that only expect None for a missing branch
747 This is helpful for callers that only expect None for a missing branch
748 (e.g. namespace).
748 (e.g. namespace).
749
749
750 '''
750 '''
751 try:
751 try:
752 return self.branchmap().branchtip(branch)
752 return self.branchmap().branchtip(branch)
753 except KeyError:
753 except KeyError:
754 if not ignoremissing:
754 if not ignoremissing:
755 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
755 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
756 else:
756 else:
757 pass
757 pass
758
758
759 def lookup(self, key):
759 def lookup(self, key):
760 return self[key].node()
760 return self[key].node()
761
761
762 def lookupbranch(self, key, remote=None):
762 def lookupbranch(self, key, remote=None):
763 repo = remote or self
763 repo = remote or self
764 if key in repo.branchmap():
764 if key in repo.branchmap():
765 return key
765 return key
766
766
767 repo = (remote and remote.local()) and remote or self
767 repo = (remote and remote.local()) and remote or self
768 return repo[key].branch()
768 return repo[key].branch()
769
769
770 def known(self, nodes):
770 def known(self, nodes):
771 nm = self.changelog.nodemap
771 nm = self.changelog.nodemap
772 pc = self._phasecache
772 pc = self._phasecache
773 result = []
773 result = []
774 for n in nodes:
774 for n in nodes:
775 r = nm.get(n)
775 r = nm.get(n)
776 resp = not (r is None or pc.phase(self, r) >= phases.secret)
776 resp = not (r is None or pc.phase(self, r) >= phases.secret)
777 result.append(resp)
777 result.append(resp)
778 return result
778 return result
779
779
780 def local(self):
780 def local(self):
781 return self
781 return self
782
782
783 def cancopy(self):
783 def cancopy(self):
784 # so statichttprepo's override of local() works
784 # so statichttprepo's override of local() works
785 if not self.local():
785 if not self.local():
786 return False
786 return False
787 if not self.ui.configbool('phases', 'publish', True):
787 if not self.ui.configbool('phases', 'publish', True):
788 return True
788 return True
789 # if publishing we can't copy if there is filtered content
789 # if publishing we can't copy if there is filtered content
790 return not self.filtered('visible').changelog.filteredrevs
790 return not self.filtered('visible').changelog.filteredrevs
791
791
792 def shared(self):
792 def shared(self):
793 '''the type of shared repository (None if not shared)'''
793 '''the type of shared repository (None if not shared)'''
794 if self.sharedpath != self.path:
794 if self.sharedpath != self.path:
795 return 'store'
795 return 'store'
796 return None
796 return None
797
797
798 def join(self, f, *insidef):
798 def join(self, f, *insidef):
799 return self.vfs.join(os.path.join(f, *insidef))
799 return self.vfs.join(os.path.join(f, *insidef))
800
800
801 def wjoin(self, f, *insidef):
801 def wjoin(self, f, *insidef):
802 return self.vfs.reljoin(self.root, f, *insidef)
802 return self.vfs.reljoin(self.root, f, *insidef)
803
803
804 def file(self, f):
804 def file(self, f):
805 if f[0] == '/':
805 if f[0] == '/':
806 f = f[1:]
806 f = f[1:]
807 return filelog.filelog(self.svfs, f)
807 return filelog.filelog(self.svfs, f)
808
808
809 def changectx(self, changeid):
809 def changectx(self, changeid):
810 return self[changeid]
810 return self[changeid]
811
811
812 def parents(self, changeid=None):
812 def parents(self, changeid=None):
813 '''get list of changectxs for parents of changeid'''
813 '''get list of changectxs for parents of changeid'''
814 return self[changeid].parents()
814 return self[changeid].parents()
815
815
816 def setparents(self, p1, p2=nullid):
816 def setparents(self, p1, p2=nullid):
817 self.dirstate.beginparentchange()
817 self.dirstate.beginparentchange()
818 copies = self.dirstate.setparents(p1, p2)
818 copies = self.dirstate.setparents(p1, p2)
819 pctx = self[p1]
819 pctx = self[p1]
820 if copies:
820 if copies:
821 # Adjust copy records, the dirstate cannot do it, it
821 # Adjust copy records, the dirstate cannot do it, it
822 # requires access to parents manifests. Preserve them
822 # requires access to parents manifests. Preserve them
823 # only for entries added to first parent.
823 # only for entries added to first parent.
824 for f in copies:
824 for f in copies:
825 if f not in pctx and copies[f] in pctx:
825 if f not in pctx and copies[f] in pctx:
826 self.dirstate.copy(copies[f], f)
826 self.dirstate.copy(copies[f], f)
827 if p2 == nullid:
827 if p2 == nullid:
828 for f, s in sorted(self.dirstate.copies().items()):
828 for f, s in sorted(self.dirstate.copies().items()):
829 if f not in pctx and s not in pctx:
829 if f not in pctx and s not in pctx:
830 self.dirstate.copy(None, f)
830 self.dirstate.copy(None, f)
831 self.dirstate.endparentchange()
831 self.dirstate.endparentchange()
832
832
833 def filectx(self, path, changeid=None, fileid=None):
833 def filectx(self, path, changeid=None, fileid=None):
834 """changeid can be a changeset revision, node, or tag.
834 """changeid can be a changeset revision, node, or tag.
835 fileid can be a file revision or node."""
835 fileid can be a file revision or node."""
836 return context.filectx(self, path, changeid, fileid)
836 return context.filectx(self, path, changeid, fileid)
837
837
838 def getcwd(self):
838 def getcwd(self):
839 return self.dirstate.getcwd()
839 return self.dirstate.getcwd()
840
840
841 def pathto(self, f, cwd=None):
841 def pathto(self, f, cwd=None):
842 return self.dirstate.pathto(f, cwd)
842 return self.dirstate.pathto(f, cwd)
843
843
844 def wfile(self, f, mode='r'):
844 def wfile(self, f, mode='r'):
845 return self.wvfs(f, mode)
845 return self.wvfs(f, mode)
846
846
847 def _link(self, f):
847 def _link(self, f):
848 return self.wvfs.islink(f)
848 return self.wvfs.islink(f)
849
849
850 def _loadfilter(self, filter):
850 def _loadfilter(self, filter):
851 if filter not in self.filterpats:
851 if filter not in self.filterpats:
852 l = []
852 l = []
853 for pat, cmd in self.ui.configitems(filter):
853 for pat, cmd in self.ui.configitems(filter):
854 if cmd == '!':
854 if cmd == '!':
855 continue
855 continue
856 mf = matchmod.match(self.root, '', [pat])
856 mf = matchmod.match(self.root, '', [pat])
857 fn = None
857 fn = None
858 params = cmd
858 params = cmd
859 for name, filterfn in self._datafilters.iteritems():
859 for name, filterfn in self._datafilters.iteritems():
860 if cmd.startswith(name):
860 if cmd.startswith(name):
861 fn = filterfn
861 fn = filterfn
862 params = cmd[len(name):].lstrip()
862 params = cmd[len(name):].lstrip()
863 break
863 break
864 if not fn:
864 if not fn:
865 fn = lambda s, c, **kwargs: util.filter(s, c)
865 fn = lambda s, c, **kwargs: util.filter(s, c)
866 # Wrap old filters not supporting keyword arguments
866 # Wrap old filters not supporting keyword arguments
867 if not inspect.getargspec(fn)[2]:
867 if not inspect.getargspec(fn)[2]:
868 oldfn = fn
868 oldfn = fn
869 fn = lambda s, c, **kwargs: oldfn(s, c)
869 fn = lambda s, c, **kwargs: oldfn(s, c)
870 l.append((mf, fn, params))
870 l.append((mf, fn, params))
871 self.filterpats[filter] = l
871 self.filterpats[filter] = l
872 return self.filterpats[filter]
872 return self.filterpats[filter]
873
873
874 def _filter(self, filterpats, filename, data):
874 def _filter(self, filterpats, filename, data):
875 for mf, fn, cmd in filterpats:
875 for mf, fn, cmd in filterpats:
876 if mf(filename):
876 if mf(filename):
877 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
877 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
878 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
878 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
879 break
879 break
880
880
881 return data
881 return data
882
882
883 @unfilteredpropertycache
883 @unfilteredpropertycache
884 def _encodefilterpats(self):
884 def _encodefilterpats(self):
885 return self._loadfilter('encode')
885 return self._loadfilter('encode')
886
886
887 @unfilteredpropertycache
887 @unfilteredpropertycache
888 def _decodefilterpats(self):
888 def _decodefilterpats(self):
889 return self._loadfilter('decode')
889 return self._loadfilter('decode')
890
890
891 def adddatafilter(self, name, filter):
891 def adddatafilter(self, name, filter):
892 self._datafilters[name] = filter
892 self._datafilters[name] = filter
893
893
894 def wread(self, filename):
894 def wread(self, filename):
895 if self._link(filename):
895 if self._link(filename):
896 data = self.wvfs.readlink(filename)
896 data = self.wvfs.readlink(filename)
897 else:
897 else:
898 data = self.wvfs.read(filename)
898 data = self.wvfs.read(filename)
899 return self._filter(self._encodefilterpats, filename, data)
899 return self._filter(self._encodefilterpats, filename, data)
900
900
901 def wwrite(self, filename, data, flags):
901 def wwrite(self, filename, data, flags):
902 data = self._filter(self._decodefilterpats, filename, data)
902 data = self._filter(self._decodefilterpats, filename, data)
903 if 'l' in flags:
903 if 'l' in flags:
904 self.wvfs.symlink(data, filename)
904 self.wvfs.symlink(data, filename)
905 else:
905 else:
906 self.wvfs.write(filename, data)
906 self.wvfs.write(filename, data)
907 if 'x' in flags:
907 if 'x' in flags:
908 self.wvfs.setflags(filename, False, True)
908 self.wvfs.setflags(filename, False, True)
909
909
910 def wwritedata(self, filename, data):
910 def wwritedata(self, filename, data):
911 return self._filter(self._decodefilterpats, filename, data)
911 return self._filter(self._decodefilterpats, filename, data)
912
912
913 def currenttransaction(self):
913 def currenttransaction(self):
914 """return the current transaction or None if non exists"""
914 """return the current transaction or None if non exists"""
915 if self._transref:
915 if self._transref:
916 tr = self._transref()
916 tr = self._transref()
917 else:
917 else:
918 tr = None
918 tr = None
919
919
920 if tr and tr.running():
920 if tr and tr.running():
921 return tr
921 return tr
922 return None
922 return None
923
923
924 def transaction(self, desc, report=None):
924 def transaction(self, desc, report=None):
925 if (self.ui.configbool('devel', 'all')
925 if (self.ui.configbool('devel', 'all')
926 or self.ui.configbool('devel', 'check-locks')):
926 or self.ui.configbool('devel', 'check-locks')):
927 l = self._lockref and self._lockref()
927 l = self._lockref and self._lockref()
928 if l is None or not l.held:
928 if l is None or not l.held:
929 msg = 'transaction with no lock\n'
929 msg = 'transaction with no lock\n'
930 if self.ui.tracebackflag:
930 if self.ui.tracebackflag:
931 util.debugstacktrace(msg, 1)
931 util.debugstacktrace(msg, 1)
932 else:
932 else:
933 self.ui.write_err(msg)
933 self.ui.write_err(msg)
934 tr = self.currenttransaction()
934 tr = self.currenttransaction()
935 if tr is not None:
935 if tr is not None:
936 return tr.nest()
936 return tr.nest()
937
937
938 # abort here if the journal already exists
938 # abort here if the journal already exists
939 if self.svfs.exists("journal"):
939 if self.svfs.exists("journal"):
940 raise error.RepoError(
940 raise error.RepoError(
941 _("abandoned transaction found"),
941 _("abandoned transaction found"),
942 hint=_("run 'hg recover' to clean up transaction"))
942 hint=_("run 'hg recover' to clean up transaction"))
943
943
944 self.hook('pretxnopen', throw=True, txnname=desc)
944 self.hook('pretxnopen', throw=True, txnname=desc)
945
945
946 self._writejournal(desc)
946 self._writejournal(desc)
947 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
947 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
948 if report:
948 if report:
949 rp = report
949 rp = report
950 else:
950 else:
951 rp = self.ui.warn
951 rp = self.ui.warn
952 vfsmap = {'plain': self.vfs} # root of .hg/
952 vfsmap = {'plain': self.vfs} # root of .hg/
953 # we must avoid cyclic reference between repo and transaction.
953 # we must avoid cyclic reference between repo and transaction.
954 reporef = weakref.ref(self)
954 reporef = weakref.ref(self)
955 def validate(tr):
955 def validate(tr):
956 """will run pre-closing hooks"""
956 """will run pre-closing hooks"""
957 pending = lambda: tr.writepending() and self.root or ""
957 pending = lambda: tr.writepending() and self.root or ""
958 reporef().hook('pretxnclose', throw=True, pending=pending,
958 reporef().hook('pretxnclose', throw=True, pending=pending,
959 xnname=desc, **tr.hookargs)
959 xnname=desc, **tr.hookargs)
960
960
961 tr = transaction.transaction(rp, self.sopener, vfsmap,
961 tr = transaction.transaction(rp, self.sopener, vfsmap,
962 "journal",
962 "journal",
963 "undo",
963 "undo",
964 aftertrans(renames),
964 aftertrans(renames),
965 self.store.createmode,
965 self.store.createmode,
966 validator=validate)
966 validator=validate)
967
967
968 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
968 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
969 tr.hookargs['TXNID'] = trid
969 tr.hookargs['TXNID'] = trid
970 # note: writing the fncache only during finalize mean that the file is
970 # note: writing the fncache only during finalize mean that the file is
971 # outdated when running hooks. As fncache is used for streaming clone,
971 # outdated when running hooks. As fncache is used for streaming clone,
972 # this is not expected to break anything that happen during the hooks.
972 # this is not expected to break anything that happen during the hooks.
973 tr.addfinalize('flush-fncache', self.store.write)
973 tr.addfinalize('flush-fncache', self.store.write)
974 def txnclosehook(tr2):
974 def txnclosehook(tr2):
975 """To be run if transaction is successful, will schedule a hook run
975 """To be run if transaction is successful, will schedule a hook run
976 """
976 """
977 def hook():
977 def hook():
978 reporef().hook('txnclose', throw=False, txnname=desc,
978 reporef().hook('txnclose', throw=False, txnname=desc,
979 **tr2.hookargs)
979 **tr2.hookargs)
980 reporef()._afterlock(hook)
980 reporef()._afterlock(hook)
981 tr.addfinalize('txnclose-hook', txnclosehook)
981 tr.addfinalize('txnclose-hook', txnclosehook)
982 self._transref = weakref.ref(tr)
982 self._transref = weakref.ref(tr)
983 return tr
983 return tr
984
984
985 def _journalfiles(self):
985 def _journalfiles(self):
986 return ((self.svfs, 'journal'),
986 return ((self.svfs, 'journal'),
987 (self.vfs, 'journal.dirstate'),
987 (self.vfs, 'journal.dirstate'),
988 (self.vfs, 'journal.branch'),
988 (self.vfs, 'journal.branch'),
989 (self.vfs, 'journal.desc'),
989 (self.vfs, 'journal.desc'),
990 (self.vfs, 'journal.bookmarks'),
990 (self.vfs, 'journal.bookmarks'),
991 (self.svfs, 'journal.phaseroots'))
991 (self.svfs, 'journal.phaseroots'))
992
992
993 def undofiles(self):
993 def undofiles(self):
994 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
994 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
995
995
996 def _writejournal(self, desc):
996 def _writejournal(self, desc):
997 self.vfs.write("journal.dirstate",
997 self.vfs.write("journal.dirstate",
998 self.vfs.tryread("dirstate"))
998 self.vfs.tryread("dirstate"))
999 self.vfs.write("journal.branch",
999 self.vfs.write("journal.branch",
1000 encoding.fromlocal(self.dirstate.branch()))
1000 encoding.fromlocal(self.dirstate.branch()))
1001 self.vfs.write("journal.desc",
1001 self.vfs.write("journal.desc",
1002 "%d\n%s\n" % (len(self), desc))
1002 "%d\n%s\n" % (len(self), desc))
1003 self.vfs.write("journal.bookmarks",
1003 self.vfs.write("journal.bookmarks",
1004 self.vfs.tryread("bookmarks"))
1004 self.vfs.tryread("bookmarks"))
1005 self.svfs.write("journal.phaseroots",
1005 self.svfs.write("journal.phaseroots",
1006 self.svfs.tryread("phaseroots"))
1006 self.svfs.tryread("phaseroots"))
1007
1007
1008 def recover(self):
1008 def recover(self):
1009 lock = self.lock()
1009 lock = self.lock()
1010 try:
1010 try:
1011 if self.svfs.exists("journal"):
1011 if self.svfs.exists("journal"):
1012 self.ui.status(_("rolling back interrupted transaction\n"))
1012 self.ui.status(_("rolling back interrupted transaction\n"))
1013 vfsmap = {'': self.svfs,
1013 vfsmap = {'': self.svfs,
1014 'plain': self.vfs,}
1014 'plain': self.vfs,}
1015 transaction.rollback(self.svfs, vfsmap, "journal",
1015 transaction.rollback(self.svfs, vfsmap, "journal",
1016 self.ui.warn)
1016 self.ui.warn)
1017 self.invalidate()
1017 self.invalidate()
1018 return True
1018 return True
1019 else:
1019 else:
1020 self.ui.warn(_("no interrupted transaction available\n"))
1020 self.ui.warn(_("no interrupted transaction available\n"))
1021 return False
1021 return False
1022 finally:
1022 finally:
1023 lock.release()
1023 lock.release()
1024
1024
1025 def rollback(self, dryrun=False, force=False):
1025 def rollback(self, dryrun=False, force=False):
1026 wlock = lock = None
1026 wlock = lock = None
1027 try:
1027 try:
1028 wlock = self.wlock()
1028 wlock = self.wlock()
1029 lock = self.lock()
1029 lock = self.lock()
1030 if self.svfs.exists("undo"):
1030 if self.svfs.exists("undo"):
1031 return self._rollback(dryrun, force)
1031 return self._rollback(dryrun, force)
1032 else:
1032 else:
1033 self.ui.warn(_("no rollback information available\n"))
1033 self.ui.warn(_("no rollback information available\n"))
1034 return 1
1034 return 1
1035 finally:
1035 finally:
1036 release(lock, wlock)
1036 release(lock, wlock)
1037
1037
1038 @unfilteredmethod # Until we get smarter cache management
1038 @unfilteredmethod # Until we get smarter cache management
1039 def _rollback(self, dryrun, force):
1039 def _rollback(self, dryrun, force):
1040 ui = self.ui
1040 ui = self.ui
1041 try:
1041 try:
1042 args = self.vfs.read('undo.desc').splitlines()
1042 args = self.vfs.read('undo.desc').splitlines()
1043 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1043 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1044 if len(args) >= 3:
1044 if len(args) >= 3:
1045 detail = args[2]
1045 detail = args[2]
1046 oldtip = oldlen - 1
1046 oldtip = oldlen - 1
1047
1047
1048 if detail and ui.verbose:
1048 if detail and ui.verbose:
1049 msg = (_('repository tip rolled back to revision %s'
1049 msg = (_('repository tip rolled back to revision %s'
1050 ' (undo %s: %s)\n')
1050 ' (undo %s: %s)\n')
1051 % (oldtip, desc, detail))
1051 % (oldtip, desc, detail))
1052 else:
1052 else:
1053 msg = (_('repository tip rolled back to revision %s'
1053 msg = (_('repository tip rolled back to revision %s'
1054 ' (undo %s)\n')
1054 ' (undo %s)\n')
1055 % (oldtip, desc))
1055 % (oldtip, desc))
1056 except IOError:
1056 except IOError:
1057 msg = _('rolling back unknown transaction\n')
1057 msg = _('rolling back unknown transaction\n')
1058 desc = None
1058 desc = None
1059
1059
1060 if not force and self['.'] != self['tip'] and desc == 'commit':
1060 if not force and self['.'] != self['tip'] and desc == 'commit':
1061 raise util.Abort(
1061 raise util.Abort(
1062 _('rollback of last commit while not checked out '
1062 _('rollback of last commit while not checked out '
1063 'may lose data'), hint=_('use -f to force'))
1063 'may lose data'), hint=_('use -f to force'))
1064
1064
1065 ui.status(msg)
1065 ui.status(msg)
1066 if dryrun:
1066 if dryrun:
1067 return 0
1067 return 0
1068
1068
1069 parents = self.dirstate.parents()
1069 parents = self.dirstate.parents()
1070 self.destroying()
1070 self.destroying()
1071 vfsmap = {'plain': self.vfs, '': self.svfs}
1071 vfsmap = {'plain': self.vfs, '': self.svfs}
1072 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1072 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1073 if self.vfs.exists('undo.bookmarks'):
1073 if self.vfs.exists('undo.bookmarks'):
1074 self.vfs.rename('undo.bookmarks', 'bookmarks')
1074 self.vfs.rename('undo.bookmarks', 'bookmarks')
1075 if self.svfs.exists('undo.phaseroots'):
1075 if self.svfs.exists('undo.phaseroots'):
1076 self.svfs.rename('undo.phaseroots', 'phaseroots')
1076 self.svfs.rename('undo.phaseroots', 'phaseroots')
1077 self.invalidate()
1077 self.invalidate()
1078
1078
1079 parentgone = (parents[0] not in self.changelog.nodemap or
1079 parentgone = (parents[0] not in self.changelog.nodemap or
1080 parents[1] not in self.changelog.nodemap)
1080 parents[1] not in self.changelog.nodemap)
1081 if parentgone:
1081 if parentgone:
1082 self.vfs.rename('undo.dirstate', 'dirstate')
1082 self.vfs.rename('undo.dirstate', 'dirstate')
1083 try:
1083 try:
1084 branch = self.vfs.read('undo.branch')
1084 branch = self.vfs.read('undo.branch')
1085 self.dirstate.setbranch(encoding.tolocal(branch))
1085 self.dirstate.setbranch(encoding.tolocal(branch))
1086 except IOError:
1086 except IOError:
1087 ui.warn(_('named branch could not be reset: '
1087 ui.warn(_('named branch could not be reset: '
1088 'current branch is still \'%s\'\n')
1088 'current branch is still \'%s\'\n')
1089 % self.dirstate.branch())
1089 % self.dirstate.branch())
1090
1090
1091 self.dirstate.invalidate()
1091 self.dirstate.invalidate()
1092 parents = tuple([p.rev() for p in self.parents()])
1092 parents = tuple([p.rev() for p in self.parents()])
1093 if len(parents) > 1:
1093 if len(parents) > 1:
1094 ui.status(_('working directory now based on '
1094 ui.status(_('working directory now based on '
1095 'revisions %d and %d\n') % parents)
1095 'revisions %d and %d\n') % parents)
1096 else:
1096 else:
1097 ui.status(_('working directory now based on '
1097 ui.status(_('working directory now based on '
1098 'revision %d\n') % parents)
1098 'revision %d\n') % parents)
1099 # TODO: if we know which new heads may result from this rollback, pass
1099 # TODO: if we know which new heads may result from this rollback, pass
1100 # them to destroy(), which will prevent the branchhead cache from being
1100 # them to destroy(), which will prevent the branchhead cache from being
1101 # invalidated.
1101 # invalidated.
1102 self.destroyed()
1102 self.destroyed()
1103 return 0
1103 return 0
1104
1104
1105 def invalidatecaches(self):
1105 def invalidatecaches(self):
1106
1106
1107 if '_tagscache' in vars(self):
1107 if '_tagscache' in vars(self):
1108 # can't use delattr on proxy
1108 # can't use delattr on proxy
1109 del self.__dict__['_tagscache']
1109 del self.__dict__['_tagscache']
1110
1110
1111 self.unfiltered()._branchcaches.clear()
1111 self.unfiltered()._branchcaches.clear()
1112 self.invalidatevolatilesets()
1112 self.invalidatevolatilesets()
1113
1113
1114 def invalidatevolatilesets(self):
1114 def invalidatevolatilesets(self):
1115 self.filteredrevcache.clear()
1115 self.filteredrevcache.clear()
1116 obsolete.clearobscaches(self)
1116 obsolete.clearobscaches(self)
1117
1117
1118 def invalidatedirstate(self):
1118 def invalidatedirstate(self):
1119 '''Invalidates the dirstate, causing the next call to dirstate
1119 '''Invalidates the dirstate, causing the next call to dirstate
1120 to check if it was modified since the last time it was read,
1120 to check if it was modified since the last time it was read,
1121 rereading it if it has.
1121 rereading it if it has.
1122
1122
1123 This is different to dirstate.invalidate() that it doesn't always
1123 This is different to dirstate.invalidate() that it doesn't always
1124 rereads the dirstate. Use dirstate.invalidate() if you want to
1124 rereads the dirstate. Use dirstate.invalidate() if you want to
1125 explicitly read the dirstate again (i.e. restoring it to a previous
1125 explicitly read the dirstate again (i.e. restoring it to a previous
1126 known good state).'''
1126 known good state).'''
1127 if hasunfilteredcache(self, 'dirstate'):
1127 if hasunfilteredcache(self, 'dirstate'):
1128 for k in self.dirstate._filecache:
1128 for k in self.dirstate._filecache:
1129 try:
1129 try:
1130 delattr(self.dirstate, k)
1130 delattr(self.dirstate, k)
1131 except AttributeError:
1131 except AttributeError:
1132 pass
1132 pass
1133 delattr(self.unfiltered(), 'dirstate')
1133 delattr(self.unfiltered(), 'dirstate')
1134
1134
1135 def invalidate(self):
1135 def invalidate(self):
1136 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1136 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1137 for k in self._filecache:
1137 for k in self._filecache:
1138 # dirstate is invalidated separately in invalidatedirstate()
1138 # dirstate is invalidated separately in invalidatedirstate()
1139 if k == 'dirstate':
1139 if k == 'dirstate':
1140 continue
1140 continue
1141
1141
1142 try:
1142 try:
1143 delattr(unfiltered, k)
1143 delattr(unfiltered, k)
1144 except AttributeError:
1144 except AttributeError:
1145 pass
1145 pass
1146 self.invalidatecaches()
1146 self.invalidatecaches()
1147 self.store.invalidatecaches()
1147 self.store.invalidatecaches()
1148
1148
1149 def invalidateall(self):
1149 def invalidateall(self):
1150 '''Fully invalidates both store and non-store parts, causing the
1150 '''Fully invalidates both store and non-store parts, causing the
1151 subsequent operation to reread any outside changes.'''
1151 subsequent operation to reread any outside changes.'''
1152 # extension should hook this to invalidate its caches
1152 # extension should hook this to invalidate its caches
1153 self.invalidate()
1153 self.invalidate()
1154 self.invalidatedirstate()
1154 self.invalidatedirstate()
1155
1155
1156 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1156 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1157 try:
1157 try:
1158 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1158 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1159 except error.LockHeld, inst:
1159 except error.LockHeld, inst:
1160 if not wait:
1160 if not wait:
1161 raise
1161 raise
1162 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1162 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1163 (desc, inst.locker))
1163 (desc, inst.locker))
1164 # default to 600 seconds timeout
1164 # default to 600 seconds timeout
1165 l = lockmod.lock(vfs, lockname,
1165 l = lockmod.lock(vfs, lockname,
1166 int(self.ui.config("ui", "timeout", "600")),
1166 int(self.ui.config("ui", "timeout", "600")),
1167 releasefn, desc=desc)
1167 releasefn, desc=desc)
1168 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1168 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1169 if acquirefn:
1169 if acquirefn:
1170 acquirefn()
1170 acquirefn()
1171 return l
1171 return l
1172
1172
1173 def _afterlock(self, callback):
1173 def _afterlock(self, callback):
1174 """add a callback to the current repository lock.
1174 """add a callback to the current repository lock.
1175
1175
1176 The callback will be executed on lock release."""
1176 The callback will be executed on lock release."""
1177 l = self._lockref and self._lockref()
1177 l = self._lockref and self._lockref()
1178 if l:
1178 if l:
1179 l.postrelease.append(callback)
1179 l.postrelease.append(callback)
1180 else:
1180 else:
1181 callback()
1181 callback()
1182
1182
1183 def lock(self, wait=True):
1183 def lock(self, wait=True):
1184 '''Lock the repository store (.hg/store) and return a weak reference
1184 '''Lock the repository store (.hg/store) and return a weak reference
1185 to the lock. Use this before modifying the store (e.g. committing or
1185 to the lock. Use this before modifying the store (e.g. committing or
1186 stripping). If you are opening a transaction, get a lock as well.)'''
1186 stripping). If you are opening a transaction, get a lock as well.)'''
1187 l = self._lockref and self._lockref()
1187 l = self._lockref and self._lockref()
1188 if l is not None and l.held:
1188 if l is not None and l.held:
1189 l.lock()
1189 l.lock()
1190 return l
1190 return l
1191
1191
1192 def unlock():
1192 def unlock():
1193 for k, ce in self._filecache.items():
1193 for k, ce in self._filecache.items():
1194 if k == 'dirstate' or k not in self.__dict__:
1194 if k == 'dirstate' or k not in self.__dict__:
1195 continue
1195 continue
1196 ce.refresh()
1196 ce.refresh()
1197
1197
1198 l = self._lock(self.svfs, "lock", wait, unlock,
1198 l = self._lock(self.svfs, "lock", wait, unlock,
1199 self.invalidate, _('repository %s') % self.origroot)
1199 self.invalidate, _('repository %s') % self.origroot)
1200 self._lockref = weakref.ref(l)
1200 self._lockref = weakref.ref(l)
1201 return l
1201 return l
1202
1202
1203 def wlock(self, wait=True):
1203 def wlock(self, wait=True):
1204 '''Lock the non-store parts of the repository (everything under
1204 '''Lock the non-store parts of the repository (everything under
1205 .hg except .hg/store) and return a weak reference to the lock.
1205 .hg except .hg/store) and return a weak reference to the lock.
1206 Use this before modifying files in .hg.'''
1206 Use this before modifying files in .hg.'''
1207 l = self._wlockref and self._wlockref()
1208 if l is not None and l.held:
1209 l.lock()
1210 return l
1211
1207 if (self.ui.configbool('devel', 'all')
1212 if (self.ui.configbool('devel', 'all')
1208 or self.ui.configbool('devel', 'check-locks')):
1213 or self.ui.configbool('devel', 'check-locks')):
1209 l = self._lockref and self._lockref()
1214 l = self._lockref and self._lockref()
1210 if l is not None and l.held:
1215 if l is not None and l.held:
1211 msg = '"lock" taken before "wlock"\n'
1216 msg = '"lock" taken before "wlock"\n'
1212 if self.ui.tracebackflag:
1217 if self.ui.tracebackflag:
1213 util.debugstacktrace(msg, 1)
1218 util.debugstacktrace(msg, 1)
1214 else:
1219 else:
1215 self.ui.write_err(msg)
1220 self.ui.write_err(msg)
1216 l = self._wlockref and self._wlockref()
1217 if l is not None and l.held:
1218 l.lock()
1219 return l
1220
1221
1221 def unlock():
1222 def unlock():
1222 if self.dirstate.pendingparentchange():
1223 if self.dirstate.pendingparentchange():
1223 self.dirstate.invalidate()
1224 self.dirstate.invalidate()
1224 else:
1225 else:
1225 self.dirstate.write()
1226 self.dirstate.write()
1226
1227
1227 self._filecache['dirstate'].refresh()
1228 self._filecache['dirstate'].refresh()
1228
1229
1229 l = self._lock(self.vfs, "wlock", wait, unlock,
1230 l = self._lock(self.vfs, "wlock", wait, unlock,
1230 self.invalidatedirstate, _('working directory of %s') %
1231 self.invalidatedirstate, _('working directory of %s') %
1231 self.origroot)
1232 self.origroot)
1232 self._wlockref = weakref.ref(l)
1233 self._wlockref = weakref.ref(l)
1233 return l
1234 return l
1234
1235
1235 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1236 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1236 """
1237 """
1237 commit an individual file as part of a larger transaction
1238 commit an individual file as part of a larger transaction
1238 """
1239 """
1239
1240
1240 fname = fctx.path()
1241 fname = fctx.path()
1241 fparent1 = manifest1.get(fname, nullid)
1242 fparent1 = manifest1.get(fname, nullid)
1242 fparent2 = manifest2.get(fname, nullid)
1243 fparent2 = manifest2.get(fname, nullid)
1243 if isinstance(fctx, context.filectx):
1244 if isinstance(fctx, context.filectx):
1244 node = fctx.filenode()
1245 node = fctx.filenode()
1245 if node in [fparent1, fparent2]:
1246 if node in [fparent1, fparent2]:
1246 self.ui.debug('reusing %s filelog entry\n' % fname)
1247 self.ui.debug('reusing %s filelog entry\n' % fname)
1247 return node
1248 return node
1248
1249
1249 flog = self.file(fname)
1250 flog = self.file(fname)
1250 meta = {}
1251 meta = {}
1251 copy = fctx.renamed()
1252 copy = fctx.renamed()
1252 if copy and copy[0] != fname:
1253 if copy and copy[0] != fname:
1253 # Mark the new revision of this file as a copy of another
1254 # Mark the new revision of this file as a copy of another
1254 # file. This copy data will effectively act as a parent
1255 # file. This copy data will effectively act as a parent
1255 # of this new revision. If this is a merge, the first
1256 # of this new revision. If this is a merge, the first
1256 # parent will be the nullid (meaning "look up the copy data")
1257 # parent will be the nullid (meaning "look up the copy data")
1257 # and the second one will be the other parent. For example:
1258 # and the second one will be the other parent. For example:
1258 #
1259 #
1259 # 0 --- 1 --- 3 rev1 changes file foo
1260 # 0 --- 1 --- 3 rev1 changes file foo
1260 # \ / rev2 renames foo to bar and changes it
1261 # \ / rev2 renames foo to bar and changes it
1261 # \- 2 -/ rev3 should have bar with all changes and
1262 # \- 2 -/ rev3 should have bar with all changes and
1262 # should record that bar descends from
1263 # should record that bar descends from
1263 # bar in rev2 and foo in rev1
1264 # bar in rev2 and foo in rev1
1264 #
1265 #
1265 # this allows this merge to succeed:
1266 # this allows this merge to succeed:
1266 #
1267 #
1267 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1268 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1268 # \ / merging rev3 and rev4 should use bar@rev2
1269 # \ / merging rev3 and rev4 should use bar@rev2
1269 # \- 2 --- 4 as the merge base
1270 # \- 2 --- 4 as the merge base
1270 #
1271 #
1271
1272
1272 cfname = copy[0]
1273 cfname = copy[0]
1273 crev = manifest1.get(cfname)
1274 crev = manifest1.get(cfname)
1274 newfparent = fparent2
1275 newfparent = fparent2
1275
1276
1276 if manifest2: # branch merge
1277 if manifest2: # branch merge
1277 if fparent2 == nullid or crev is None: # copied on remote side
1278 if fparent2 == nullid or crev is None: # copied on remote side
1278 if cfname in manifest2:
1279 if cfname in manifest2:
1279 crev = manifest2[cfname]
1280 crev = manifest2[cfname]
1280 newfparent = fparent1
1281 newfparent = fparent1
1281
1282
1282 # Here, we used to search backwards through history to try to find
1283 # Here, we used to search backwards through history to try to find
1283 # where the file copy came from if the source of a copy was not in
1284 # where the file copy came from if the source of a copy was not in
1284 # the parent directory. However, this doesn't actually make sense to
1285 # the parent directory. However, this doesn't actually make sense to
1285 # do (what does a copy from something not in your working copy even
1286 # do (what does a copy from something not in your working copy even
1286 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1287 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1287 # the user that copy information was dropped, so if they didn't
1288 # the user that copy information was dropped, so if they didn't
1288 # expect this outcome it can be fixed, but this is the correct
1289 # expect this outcome it can be fixed, but this is the correct
1289 # behavior in this circumstance.
1290 # behavior in this circumstance.
1290
1291
1291 if crev:
1292 if crev:
1292 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1293 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1293 meta["copy"] = cfname
1294 meta["copy"] = cfname
1294 meta["copyrev"] = hex(crev)
1295 meta["copyrev"] = hex(crev)
1295 fparent1, fparent2 = nullid, newfparent
1296 fparent1, fparent2 = nullid, newfparent
1296 else:
1297 else:
1297 self.ui.warn(_("warning: can't find ancestor for '%s' "
1298 self.ui.warn(_("warning: can't find ancestor for '%s' "
1298 "copied from '%s'!\n") % (fname, cfname))
1299 "copied from '%s'!\n") % (fname, cfname))
1299
1300
1300 elif fparent1 == nullid:
1301 elif fparent1 == nullid:
1301 fparent1, fparent2 = fparent2, nullid
1302 fparent1, fparent2 = fparent2, nullid
1302 elif fparent2 != nullid:
1303 elif fparent2 != nullid:
1303 # is one parent an ancestor of the other?
1304 # is one parent an ancestor of the other?
1304 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1305 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1305 if fparent1 in fparentancestors:
1306 if fparent1 in fparentancestors:
1306 fparent1, fparent2 = fparent2, nullid
1307 fparent1, fparent2 = fparent2, nullid
1307 elif fparent2 in fparentancestors:
1308 elif fparent2 in fparentancestors:
1308 fparent2 = nullid
1309 fparent2 = nullid
1309
1310
1310 # is the file changed?
1311 # is the file changed?
1311 text = fctx.data()
1312 text = fctx.data()
1312 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1313 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1313 changelist.append(fname)
1314 changelist.append(fname)
1314 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1315 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1315 # are just the flags changed during merge?
1316 # are just the flags changed during merge?
1316 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1317 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1317 changelist.append(fname)
1318 changelist.append(fname)
1318
1319
1319 return fparent1
1320 return fparent1
1320
1321
1321 @unfilteredmethod
1322 @unfilteredmethod
1322 def commit(self, text="", user=None, date=None, match=None, force=False,
1323 def commit(self, text="", user=None, date=None, match=None, force=False,
1323 editor=False, extra={}):
1324 editor=False, extra={}):
1324 """Add a new revision to current repository.
1325 """Add a new revision to current repository.
1325
1326
1326 Revision information is gathered from the working directory,
1327 Revision information is gathered from the working directory,
1327 match can be used to filter the committed files. If editor is
1328 match can be used to filter the committed files. If editor is
1328 supplied, it is called to get a commit message.
1329 supplied, it is called to get a commit message.
1329 """
1330 """
1330
1331
1331 def fail(f, msg):
1332 def fail(f, msg):
1332 raise util.Abort('%s: %s' % (f, msg))
1333 raise util.Abort('%s: %s' % (f, msg))
1333
1334
1334 if not match:
1335 if not match:
1335 match = matchmod.always(self.root, '')
1336 match = matchmod.always(self.root, '')
1336
1337
1337 if not force:
1338 if not force:
1338 vdirs = []
1339 vdirs = []
1339 match.explicitdir = vdirs.append
1340 match.explicitdir = vdirs.append
1340 match.bad = fail
1341 match.bad = fail
1341
1342
1342 wlock = self.wlock()
1343 wlock = self.wlock()
1343 try:
1344 try:
1344 wctx = self[None]
1345 wctx = self[None]
1345 merge = len(wctx.parents()) > 1
1346 merge = len(wctx.parents()) > 1
1346
1347
1347 if not force and merge and not match.always():
1348 if not force and merge and not match.always():
1348 raise util.Abort(_('cannot partially commit a merge '
1349 raise util.Abort(_('cannot partially commit a merge '
1349 '(do not specify files or patterns)'))
1350 '(do not specify files or patterns)'))
1350
1351
1351 status = self.status(match=match, clean=force)
1352 status = self.status(match=match, clean=force)
1352 if force:
1353 if force:
1353 status.modified.extend(status.clean) # mq may commit clean files
1354 status.modified.extend(status.clean) # mq may commit clean files
1354
1355
1355 # check subrepos
1356 # check subrepos
1356 subs = []
1357 subs = []
1357 commitsubs = set()
1358 commitsubs = set()
1358 newstate = wctx.substate.copy()
1359 newstate = wctx.substate.copy()
1359 # only manage subrepos and .hgsubstate if .hgsub is present
1360 # only manage subrepos and .hgsubstate if .hgsub is present
1360 if '.hgsub' in wctx:
1361 if '.hgsub' in wctx:
1361 # we'll decide whether to track this ourselves, thanks
1362 # we'll decide whether to track this ourselves, thanks
1362 for c in status.modified, status.added, status.removed:
1363 for c in status.modified, status.added, status.removed:
1363 if '.hgsubstate' in c:
1364 if '.hgsubstate' in c:
1364 c.remove('.hgsubstate')
1365 c.remove('.hgsubstate')
1365
1366
1366 # compare current state to last committed state
1367 # compare current state to last committed state
1367 # build new substate based on last committed state
1368 # build new substate based on last committed state
1368 oldstate = wctx.p1().substate
1369 oldstate = wctx.p1().substate
1369 for s in sorted(newstate.keys()):
1370 for s in sorted(newstate.keys()):
1370 if not match(s):
1371 if not match(s):
1371 # ignore working copy, use old state if present
1372 # ignore working copy, use old state if present
1372 if s in oldstate:
1373 if s in oldstate:
1373 newstate[s] = oldstate[s]
1374 newstate[s] = oldstate[s]
1374 continue
1375 continue
1375 if not force:
1376 if not force:
1376 raise util.Abort(
1377 raise util.Abort(
1377 _("commit with new subrepo %s excluded") % s)
1378 _("commit with new subrepo %s excluded") % s)
1378 dirtyreason = wctx.sub(s).dirtyreason(True)
1379 dirtyreason = wctx.sub(s).dirtyreason(True)
1379 if dirtyreason:
1380 if dirtyreason:
1380 if not self.ui.configbool('ui', 'commitsubrepos'):
1381 if not self.ui.configbool('ui', 'commitsubrepos'):
1381 raise util.Abort(dirtyreason,
1382 raise util.Abort(dirtyreason,
1382 hint=_("use --subrepos for recursive commit"))
1383 hint=_("use --subrepos for recursive commit"))
1383 subs.append(s)
1384 subs.append(s)
1384 commitsubs.add(s)
1385 commitsubs.add(s)
1385 else:
1386 else:
1386 bs = wctx.sub(s).basestate()
1387 bs = wctx.sub(s).basestate()
1387 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1388 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1388 if oldstate.get(s, (None, None, None))[1] != bs:
1389 if oldstate.get(s, (None, None, None))[1] != bs:
1389 subs.append(s)
1390 subs.append(s)
1390
1391
1391 # check for removed subrepos
1392 # check for removed subrepos
1392 for p in wctx.parents():
1393 for p in wctx.parents():
1393 r = [s for s in p.substate if s not in newstate]
1394 r = [s for s in p.substate if s not in newstate]
1394 subs += [s for s in r if match(s)]
1395 subs += [s for s in r if match(s)]
1395 if subs:
1396 if subs:
1396 if (not match('.hgsub') and
1397 if (not match('.hgsub') and
1397 '.hgsub' in (wctx.modified() + wctx.added())):
1398 '.hgsub' in (wctx.modified() + wctx.added())):
1398 raise util.Abort(
1399 raise util.Abort(
1399 _("can't commit subrepos without .hgsub"))
1400 _("can't commit subrepos without .hgsub"))
1400 status.modified.insert(0, '.hgsubstate')
1401 status.modified.insert(0, '.hgsubstate')
1401
1402
1402 elif '.hgsub' in status.removed:
1403 elif '.hgsub' in status.removed:
1403 # clean up .hgsubstate when .hgsub is removed
1404 # clean up .hgsubstate when .hgsub is removed
1404 if ('.hgsubstate' in wctx and
1405 if ('.hgsubstate' in wctx and
1405 '.hgsubstate' not in (status.modified + status.added +
1406 '.hgsubstate' not in (status.modified + status.added +
1406 status.removed)):
1407 status.removed)):
1407 status.removed.insert(0, '.hgsubstate')
1408 status.removed.insert(0, '.hgsubstate')
1408
1409
1409 # make sure all explicit patterns are matched
1410 # make sure all explicit patterns are matched
1410 if not force and match.files():
1411 if not force and match.files():
1411 matched = set(status.modified + status.added + status.removed)
1412 matched = set(status.modified + status.added + status.removed)
1412
1413
1413 for f in match.files():
1414 for f in match.files():
1414 f = self.dirstate.normalize(f)
1415 f = self.dirstate.normalize(f)
1415 if f == '.' or f in matched or f in wctx.substate:
1416 if f == '.' or f in matched or f in wctx.substate:
1416 continue
1417 continue
1417 if f in status.deleted:
1418 if f in status.deleted:
1418 fail(f, _('file not found!'))
1419 fail(f, _('file not found!'))
1419 if f in vdirs: # visited directory
1420 if f in vdirs: # visited directory
1420 d = f + '/'
1421 d = f + '/'
1421 for mf in matched:
1422 for mf in matched:
1422 if mf.startswith(d):
1423 if mf.startswith(d):
1423 break
1424 break
1424 else:
1425 else:
1425 fail(f, _("no match under directory!"))
1426 fail(f, _("no match under directory!"))
1426 elif f not in self.dirstate:
1427 elif f not in self.dirstate:
1427 fail(f, _("file not tracked!"))
1428 fail(f, _("file not tracked!"))
1428
1429
1429 cctx = context.workingcommitctx(self, status,
1430 cctx = context.workingcommitctx(self, status,
1430 text, user, date, extra)
1431 text, user, date, extra)
1431
1432
1432 if (not force and not extra.get("close") and not merge
1433 if (not force and not extra.get("close") and not merge
1433 and not cctx.files()
1434 and not cctx.files()
1434 and wctx.branch() == wctx.p1().branch()):
1435 and wctx.branch() == wctx.p1().branch()):
1435 return None
1436 return None
1436
1437
1437 if merge and cctx.deleted():
1438 if merge and cctx.deleted():
1438 raise util.Abort(_("cannot commit merge with missing files"))
1439 raise util.Abort(_("cannot commit merge with missing files"))
1439
1440
1440 ms = mergemod.mergestate(self)
1441 ms = mergemod.mergestate(self)
1441 for f in status.modified:
1442 for f in status.modified:
1442 if f in ms and ms[f] == 'u':
1443 if f in ms and ms[f] == 'u':
1443 raise util.Abort(_('unresolved merge conflicts '
1444 raise util.Abort(_('unresolved merge conflicts '
1444 '(see "hg help resolve")'))
1445 '(see "hg help resolve")'))
1445
1446
1446 if editor:
1447 if editor:
1447 cctx._text = editor(self, cctx, subs)
1448 cctx._text = editor(self, cctx, subs)
1448 edited = (text != cctx._text)
1449 edited = (text != cctx._text)
1449
1450
1450 # Save commit message in case this transaction gets rolled back
1451 # Save commit message in case this transaction gets rolled back
1451 # (e.g. by a pretxncommit hook). Leave the content alone on
1452 # (e.g. by a pretxncommit hook). Leave the content alone on
1452 # the assumption that the user will use the same editor again.
1453 # the assumption that the user will use the same editor again.
1453 msgfn = self.savecommitmessage(cctx._text)
1454 msgfn = self.savecommitmessage(cctx._text)
1454
1455
1455 # commit subs and write new state
1456 # commit subs and write new state
1456 if subs:
1457 if subs:
1457 for s in sorted(commitsubs):
1458 for s in sorted(commitsubs):
1458 sub = wctx.sub(s)
1459 sub = wctx.sub(s)
1459 self.ui.status(_('committing subrepository %s\n') %
1460 self.ui.status(_('committing subrepository %s\n') %
1460 subrepo.subrelpath(sub))
1461 subrepo.subrelpath(sub))
1461 sr = sub.commit(cctx._text, user, date)
1462 sr = sub.commit(cctx._text, user, date)
1462 newstate[s] = (newstate[s][0], sr)
1463 newstate[s] = (newstate[s][0], sr)
1463 subrepo.writestate(self, newstate)
1464 subrepo.writestate(self, newstate)
1464
1465
1465 p1, p2 = self.dirstate.parents()
1466 p1, p2 = self.dirstate.parents()
1466 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1467 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1467 try:
1468 try:
1468 self.hook("precommit", throw=True, parent1=hookp1,
1469 self.hook("precommit", throw=True, parent1=hookp1,
1469 parent2=hookp2)
1470 parent2=hookp2)
1470 ret = self.commitctx(cctx, True)
1471 ret = self.commitctx(cctx, True)
1471 except: # re-raises
1472 except: # re-raises
1472 if edited:
1473 if edited:
1473 self.ui.write(
1474 self.ui.write(
1474 _('note: commit message saved in %s\n') % msgfn)
1475 _('note: commit message saved in %s\n') % msgfn)
1475 raise
1476 raise
1476
1477
1477 # update bookmarks, dirstate and mergestate
1478 # update bookmarks, dirstate and mergestate
1478 bookmarks.update(self, [p1, p2], ret)
1479 bookmarks.update(self, [p1, p2], ret)
1479 cctx.markcommitted(ret)
1480 cctx.markcommitted(ret)
1480 ms.reset()
1481 ms.reset()
1481 finally:
1482 finally:
1482 wlock.release()
1483 wlock.release()
1483
1484
1484 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1485 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1485 # hack for command that use a temporary commit (eg: histedit)
1486 # hack for command that use a temporary commit (eg: histedit)
1486 # temporary commit got stripped before hook release
1487 # temporary commit got stripped before hook release
1487 if node in self:
1488 if node in self:
1488 self.hook("commit", node=node, parent1=parent1,
1489 self.hook("commit", node=node, parent1=parent1,
1489 parent2=parent2)
1490 parent2=parent2)
1490 self._afterlock(commithook)
1491 self._afterlock(commithook)
1491 return ret
1492 return ret
1492
1493
1493 @unfilteredmethod
1494 @unfilteredmethod
1494 def commitctx(self, ctx, error=False):
1495 def commitctx(self, ctx, error=False):
1495 """Add a new revision to current repository.
1496 """Add a new revision to current repository.
1496 Revision information is passed via the context argument.
1497 Revision information is passed via the context argument.
1497 """
1498 """
1498
1499
1499 tr = None
1500 tr = None
1500 p1, p2 = ctx.p1(), ctx.p2()
1501 p1, p2 = ctx.p1(), ctx.p2()
1501 user = ctx.user()
1502 user = ctx.user()
1502
1503
1503 lock = self.lock()
1504 lock = self.lock()
1504 try:
1505 try:
1505 tr = self.transaction("commit")
1506 tr = self.transaction("commit")
1506 trp = weakref.proxy(tr)
1507 trp = weakref.proxy(tr)
1507
1508
1508 if ctx.files():
1509 if ctx.files():
1509 m1 = p1.manifest()
1510 m1 = p1.manifest()
1510 m2 = p2.manifest()
1511 m2 = p2.manifest()
1511 m = m1.copy()
1512 m = m1.copy()
1512
1513
1513 # check in files
1514 # check in files
1514 added = []
1515 added = []
1515 changed = []
1516 changed = []
1516 removed = list(ctx.removed())
1517 removed = list(ctx.removed())
1517 linkrev = len(self)
1518 linkrev = len(self)
1518 self.ui.note(_("committing files:\n"))
1519 self.ui.note(_("committing files:\n"))
1519 for f in sorted(ctx.modified() + ctx.added()):
1520 for f in sorted(ctx.modified() + ctx.added()):
1520 self.ui.note(f + "\n")
1521 self.ui.note(f + "\n")
1521 try:
1522 try:
1522 fctx = ctx[f]
1523 fctx = ctx[f]
1523 if fctx is None:
1524 if fctx is None:
1524 removed.append(f)
1525 removed.append(f)
1525 else:
1526 else:
1526 added.append(f)
1527 added.append(f)
1527 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1528 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1528 trp, changed)
1529 trp, changed)
1529 m.setflag(f, fctx.flags())
1530 m.setflag(f, fctx.flags())
1530 except OSError, inst:
1531 except OSError, inst:
1531 self.ui.warn(_("trouble committing %s!\n") % f)
1532 self.ui.warn(_("trouble committing %s!\n") % f)
1532 raise
1533 raise
1533 except IOError, inst:
1534 except IOError, inst:
1534 errcode = getattr(inst, 'errno', errno.ENOENT)
1535 errcode = getattr(inst, 'errno', errno.ENOENT)
1535 if error or errcode and errcode != errno.ENOENT:
1536 if error or errcode and errcode != errno.ENOENT:
1536 self.ui.warn(_("trouble committing %s!\n") % f)
1537 self.ui.warn(_("trouble committing %s!\n") % f)
1537 raise
1538 raise
1538
1539
1539 # update manifest
1540 # update manifest
1540 self.ui.note(_("committing manifest\n"))
1541 self.ui.note(_("committing manifest\n"))
1541 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1542 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1542 drop = [f for f in removed if f in m]
1543 drop = [f for f in removed if f in m]
1543 for f in drop:
1544 for f in drop:
1544 del m[f]
1545 del m[f]
1545 mn = self.manifest.add(m, trp, linkrev,
1546 mn = self.manifest.add(m, trp, linkrev,
1546 p1.manifestnode(), p2.manifestnode(),
1547 p1.manifestnode(), p2.manifestnode(),
1547 added, drop)
1548 added, drop)
1548 files = changed + removed
1549 files = changed + removed
1549 else:
1550 else:
1550 mn = p1.manifestnode()
1551 mn = p1.manifestnode()
1551 files = []
1552 files = []
1552
1553
1553 # update changelog
1554 # update changelog
1554 self.ui.note(_("committing changelog\n"))
1555 self.ui.note(_("committing changelog\n"))
1555 self.changelog.delayupdate(tr)
1556 self.changelog.delayupdate(tr)
1556 n = self.changelog.add(mn, files, ctx.description(),
1557 n = self.changelog.add(mn, files, ctx.description(),
1557 trp, p1.node(), p2.node(),
1558 trp, p1.node(), p2.node(),
1558 user, ctx.date(), ctx.extra().copy())
1559 user, ctx.date(), ctx.extra().copy())
1559 p = lambda: tr.writepending() and self.root or ""
1560 p = lambda: tr.writepending() and self.root or ""
1560 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1561 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1561 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1562 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1562 parent2=xp2, pending=p)
1563 parent2=xp2, pending=p)
1563 # set the new commit is proper phase
1564 # set the new commit is proper phase
1564 targetphase = subrepo.newcommitphase(self.ui, ctx)
1565 targetphase = subrepo.newcommitphase(self.ui, ctx)
1565 if targetphase:
1566 if targetphase:
1566 # retract boundary do not alter parent changeset.
1567 # retract boundary do not alter parent changeset.
1567 # if a parent have higher the resulting phase will
1568 # if a parent have higher the resulting phase will
1568 # be compliant anyway
1569 # be compliant anyway
1569 #
1570 #
1570 # if minimal phase was 0 we don't need to retract anything
1571 # if minimal phase was 0 we don't need to retract anything
1571 phases.retractboundary(self, tr, targetphase, [n])
1572 phases.retractboundary(self, tr, targetphase, [n])
1572 tr.close()
1573 tr.close()
1573 branchmap.updatecache(self.filtered('served'))
1574 branchmap.updatecache(self.filtered('served'))
1574 return n
1575 return n
1575 finally:
1576 finally:
1576 if tr:
1577 if tr:
1577 tr.release()
1578 tr.release()
1578 lock.release()
1579 lock.release()
1579
1580
1580 @unfilteredmethod
1581 @unfilteredmethod
1581 def destroying(self):
1582 def destroying(self):
1582 '''Inform the repository that nodes are about to be destroyed.
1583 '''Inform the repository that nodes are about to be destroyed.
1583 Intended for use by strip and rollback, so there's a common
1584 Intended for use by strip and rollback, so there's a common
1584 place for anything that has to be done before destroying history.
1585 place for anything that has to be done before destroying history.
1585
1586
1586 This is mostly useful for saving state that is in memory and waiting
1587 This is mostly useful for saving state that is in memory and waiting
1587 to be flushed when the current lock is released. Because a call to
1588 to be flushed when the current lock is released. Because a call to
1588 destroyed is imminent, the repo will be invalidated causing those
1589 destroyed is imminent, the repo will be invalidated causing those
1589 changes to stay in memory (waiting for the next unlock), or vanish
1590 changes to stay in memory (waiting for the next unlock), or vanish
1590 completely.
1591 completely.
1591 '''
1592 '''
1592 # When using the same lock to commit and strip, the phasecache is left
1593 # When using the same lock to commit and strip, the phasecache is left
1593 # dirty after committing. Then when we strip, the repo is invalidated,
1594 # dirty after committing. Then when we strip, the repo is invalidated,
1594 # causing those changes to disappear.
1595 # causing those changes to disappear.
1595 if '_phasecache' in vars(self):
1596 if '_phasecache' in vars(self):
1596 self._phasecache.write()
1597 self._phasecache.write()
1597
1598
1598 @unfilteredmethod
1599 @unfilteredmethod
1599 def destroyed(self):
1600 def destroyed(self):
1600 '''Inform the repository that nodes have been destroyed.
1601 '''Inform the repository that nodes have been destroyed.
1601 Intended for use by strip and rollback, so there's a common
1602 Intended for use by strip and rollback, so there's a common
1602 place for anything that has to be done after destroying history.
1603 place for anything that has to be done after destroying history.
1603 '''
1604 '''
1604 # When one tries to:
1605 # When one tries to:
1605 # 1) destroy nodes thus calling this method (e.g. strip)
1606 # 1) destroy nodes thus calling this method (e.g. strip)
1606 # 2) use phasecache somewhere (e.g. commit)
1607 # 2) use phasecache somewhere (e.g. commit)
1607 #
1608 #
1608 # then 2) will fail because the phasecache contains nodes that were
1609 # then 2) will fail because the phasecache contains nodes that were
1609 # removed. We can either remove phasecache from the filecache,
1610 # removed. We can either remove phasecache from the filecache,
1610 # causing it to reload next time it is accessed, or simply filter
1611 # causing it to reload next time it is accessed, or simply filter
1611 # the removed nodes now and write the updated cache.
1612 # the removed nodes now and write the updated cache.
1612 self._phasecache.filterunknown(self)
1613 self._phasecache.filterunknown(self)
1613 self._phasecache.write()
1614 self._phasecache.write()
1614
1615
1615 # update the 'served' branch cache to help read only server process
1616 # update the 'served' branch cache to help read only server process
1616 # Thanks to branchcache collaboration this is done from the nearest
1617 # Thanks to branchcache collaboration this is done from the nearest
1617 # filtered subset and it is expected to be fast.
1618 # filtered subset and it is expected to be fast.
1618 branchmap.updatecache(self.filtered('served'))
1619 branchmap.updatecache(self.filtered('served'))
1619
1620
1620 # Ensure the persistent tag cache is updated. Doing it now
1621 # Ensure the persistent tag cache is updated. Doing it now
1621 # means that the tag cache only has to worry about destroyed
1622 # means that the tag cache only has to worry about destroyed
1622 # heads immediately after a strip/rollback. That in turn
1623 # heads immediately after a strip/rollback. That in turn
1623 # guarantees that "cachetip == currenttip" (comparing both rev
1624 # guarantees that "cachetip == currenttip" (comparing both rev
1624 # and node) always means no nodes have been added or destroyed.
1625 # and node) always means no nodes have been added or destroyed.
1625
1626
1626 # XXX this is suboptimal when qrefresh'ing: we strip the current
1627 # XXX this is suboptimal when qrefresh'ing: we strip the current
1627 # head, refresh the tag cache, then immediately add a new head.
1628 # head, refresh the tag cache, then immediately add a new head.
1628 # But I think doing it this way is necessary for the "instant
1629 # But I think doing it this way is necessary for the "instant
1629 # tag cache retrieval" case to work.
1630 # tag cache retrieval" case to work.
1630 self.invalidate()
1631 self.invalidate()
1631
1632
1632 def walk(self, match, node=None):
1633 def walk(self, match, node=None):
1633 '''
1634 '''
1634 walk recursively through the directory tree or a given
1635 walk recursively through the directory tree or a given
1635 changeset, finding all files matched by the match
1636 changeset, finding all files matched by the match
1636 function
1637 function
1637 '''
1638 '''
1638 return self[node].walk(match)
1639 return self[node].walk(match)
1639
1640
1640 def status(self, node1='.', node2=None, match=None,
1641 def status(self, node1='.', node2=None, match=None,
1641 ignored=False, clean=False, unknown=False,
1642 ignored=False, clean=False, unknown=False,
1642 listsubrepos=False):
1643 listsubrepos=False):
1643 '''a convenience method that calls node1.status(node2)'''
1644 '''a convenience method that calls node1.status(node2)'''
1644 return self[node1].status(node2, match, ignored, clean, unknown,
1645 return self[node1].status(node2, match, ignored, clean, unknown,
1645 listsubrepos)
1646 listsubrepos)
1646
1647
1647 def heads(self, start=None):
1648 def heads(self, start=None):
1648 heads = self.changelog.heads(start)
1649 heads = self.changelog.heads(start)
1649 # sort the output in rev descending order
1650 # sort the output in rev descending order
1650 return sorted(heads, key=self.changelog.rev, reverse=True)
1651 return sorted(heads, key=self.changelog.rev, reverse=True)
1651
1652
1652 def branchheads(self, branch=None, start=None, closed=False):
1653 def branchheads(self, branch=None, start=None, closed=False):
1653 '''return a (possibly filtered) list of heads for the given branch
1654 '''return a (possibly filtered) list of heads for the given branch
1654
1655
1655 Heads are returned in topological order, from newest to oldest.
1656 Heads are returned in topological order, from newest to oldest.
1656 If branch is None, use the dirstate branch.
1657 If branch is None, use the dirstate branch.
1657 If start is not None, return only heads reachable from start.
1658 If start is not None, return only heads reachable from start.
1658 If closed is True, return heads that are marked as closed as well.
1659 If closed is True, return heads that are marked as closed as well.
1659 '''
1660 '''
1660 if branch is None:
1661 if branch is None:
1661 branch = self[None].branch()
1662 branch = self[None].branch()
1662 branches = self.branchmap()
1663 branches = self.branchmap()
1663 if branch not in branches:
1664 if branch not in branches:
1664 return []
1665 return []
1665 # the cache returns heads ordered lowest to highest
1666 # the cache returns heads ordered lowest to highest
1666 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1667 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1667 if start is not None:
1668 if start is not None:
1668 # filter out the heads that cannot be reached from startrev
1669 # filter out the heads that cannot be reached from startrev
1669 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1670 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1670 bheads = [h for h in bheads if h in fbheads]
1671 bheads = [h for h in bheads if h in fbheads]
1671 return bheads
1672 return bheads
1672
1673
1673 def branches(self, nodes):
1674 def branches(self, nodes):
1674 if not nodes:
1675 if not nodes:
1675 nodes = [self.changelog.tip()]
1676 nodes = [self.changelog.tip()]
1676 b = []
1677 b = []
1677 for n in nodes:
1678 for n in nodes:
1678 t = n
1679 t = n
1679 while True:
1680 while True:
1680 p = self.changelog.parents(n)
1681 p = self.changelog.parents(n)
1681 if p[1] != nullid or p[0] == nullid:
1682 if p[1] != nullid or p[0] == nullid:
1682 b.append((t, n, p[0], p[1]))
1683 b.append((t, n, p[0], p[1]))
1683 break
1684 break
1684 n = p[0]
1685 n = p[0]
1685 return b
1686 return b
1686
1687
1687 def between(self, pairs):
1688 def between(self, pairs):
1688 r = []
1689 r = []
1689
1690
1690 for top, bottom in pairs:
1691 for top, bottom in pairs:
1691 n, l, i = top, [], 0
1692 n, l, i = top, [], 0
1692 f = 1
1693 f = 1
1693
1694
1694 while n != bottom and n != nullid:
1695 while n != bottom and n != nullid:
1695 p = self.changelog.parents(n)[0]
1696 p = self.changelog.parents(n)[0]
1696 if i == f:
1697 if i == f:
1697 l.append(n)
1698 l.append(n)
1698 f = f * 2
1699 f = f * 2
1699 n = p
1700 n = p
1700 i += 1
1701 i += 1
1701
1702
1702 r.append(l)
1703 r.append(l)
1703
1704
1704 return r
1705 return r
1705
1706
1706 def checkpush(self, pushop):
1707 def checkpush(self, pushop):
1707 """Extensions can override this function if additional checks have
1708 """Extensions can override this function if additional checks have
1708 to be performed before pushing, or call it if they override push
1709 to be performed before pushing, or call it if they override push
1709 command.
1710 command.
1710 """
1711 """
1711 pass
1712 pass
1712
1713
1713 @unfilteredpropertycache
1714 @unfilteredpropertycache
1714 def prepushoutgoinghooks(self):
1715 def prepushoutgoinghooks(self):
1715 """Return util.hooks consists of "(repo, remote, outgoing)"
1716 """Return util.hooks consists of "(repo, remote, outgoing)"
1716 functions, which are called before pushing changesets.
1717 functions, which are called before pushing changesets.
1717 """
1718 """
1718 return util.hooks()
1719 return util.hooks()
1719
1720
1720 def stream_in(self, remote, requirements):
1721 def stream_in(self, remote, requirements):
1721 lock = self.lock()
1722 lock = self.lock()
1722 try:
1723 try:
1723 # Save remote branchmap. We will use it later
1724 # Save remote branchmap. We will use it later
1724 # to speed up branchcache creation
1725 # to speed up branchcache creation
1725 rbranchmap = None
1726 rbranchmap = None
1726 if remote.capable("branchmap"):
1727 if remote.capable("branchmap"):
1727 rbranchmap = remote.branchmap()
1728 rbranchmap = remote.branchmap()
1728
1729
1729 fp = remote.stream_out()
1730 fp = remote.stream_out()
1730 l = fp.readline()
1731 l = fp.readline()
1731 try:
1732 try:
1732 resp = int(l)
1733 resp = int(l)
1733 except ValueError:
1734 except ValueError:
1734 raise error.ResponseError(
1735 raise error.ResponseError(
1735 _('unexpected response from remote server:'), l)
1736 _('unexpected response from remote server:'), l)
1736 if resp == 1:
1737 if resp == 1:
1737 raise util.Abort(_('operation forbidden by server'))
1738 raise util.Abort(_('operation forbidden by server'))
1738 elif resp == 2:
1739 elif resp == 2:
1739 raise util.Abort(_('locking the remote repository failed'))
1740 raise util.Abort(_('locking the remote repository failed'))
1740 elif resp != 0:
1741 elif resp != 0:
1741 raise util.Abort(_('the server sent an unknown error code'))
1742 raise util.Abort(_('the server sent an unknown error code'))
1742 self.ui.status(_('streaming all changes\n'))
1743 self.ui.status(_('streaming all changes\n'))
1743 l = fp.readline()
1744 l = fp.readline()
1744 try:
1745 try:
1745 total_files, total_bytes = map(int, l.split(' ', 1))
1746 total_files, total_bytes = map(int, l.split(' ', 1))
1746 except (ValueError, TypeError):
1747 except (ValueError, TypeError):
1747 raise error.ResponseError(
1748 raise error.ResponseError(
1748 _('unexpected response from remote server:'), l)
1749 _('unexpected response from remote server:'), l)
1749 self.ui.status(_('%d files to transfer, %s of data\n') %
1750 self.ui.status(_('%d files to transfer, %s of data\n') %
1750 (total_files, util.bytecount(total_bytes)))
1751 (total_files, util.bytecount(total_bytes)))
1751 handled_bytes = 0
1752 handled_bytes = 0
1752 self.ui.progress(_('clone'), 0, total=total_bytes)
1753 self.ui.progress(_('clone'), 0, total=total_bytes)
1753 start = time.time()
1754 start = time.time()
1754
1755
1755 tr = self.transaction(_('clone'))
1756 tr = self.transaction(_('clone'))
1756 try:
1757 try:
1757 for i in xrange(total_files):
1758 for i in xrange(total_files):
1758 # XXX doesn't support '\n' or '\r' in filenames
1759 # XXX doesn't support '\n' or '\r' in filenames
1759 l = fp.readline()
1760 l = fp.readline()
1760 try:
1761 try:
1761 name, size = l.split('\0', 1)
1762 name, size = l.split('\0', 1)
1762 size = int(size)
1763 size = int(size)
1763 except (ValueError, TypeError):
1764 except (ValueError, TypeError):
1764 raise error.ResponseError(
1765 raise error.ResponseError(
1765 _('unexpected response from remote server:'), l)
1766 _('unexpected response from remote server:'), l)
1766 if self.ui.debugflag:
1767 if self.ui.debugflag:
1767 self.ui.debug('adding %s (%s)\n' %
1768 self.ui.debug('adding %s (%s)\n' %
1768 (name, util.bytecount(size)))
1769 (name, util.bytecount(size)))
1769 # for backwards compat, name was partially encoded
1770 # for backwards compat, name was partially encoded
1770 ofp = self.svfs(store.decodedir(name), 'w')
1771 ofp = self.svfs(store.decodedir(name), 'w')
1771 for chunk in util.filechunkiter(fp, limit=size):
1772 for chunk in util.filechunkiter(fp, limit=size):
1772 handled_bytes += len(chunk)
1773 handled_bytes += len(chunk)
1773 self.ui.progress(_('clone'), handled_bytes,
1774 self.ui.progress(_('clone'), handled_bytes,
1774 total=total_bytes)
1775 total=total_bytes)
1775 ofp.write(chunk)
1776 ofp.write(chunk)
1776 ofp.close()
1777 ofp.close()
1777 tr.close()
1778 tr.close()
1778 finally:
1779 finally:
1779 tr.release()
1780 tr.release()
1780
1781
1781 # Writing straight to files circumvented the inmemory caches
1782 # Writing straight to files circumvented the inmemory caches
1782 self.invalidate()
1783 self.invalidate()
1783
1784
1784 elapsed = time.time() - start
1785 elapsed = time.time() - start
1785 if elapsed <= 0:
1786 if elapsed <= 0:
1786 elapsed = 0.001
1787 elapsed = 0.001
1787 self.ui.progress(_('clone'), None)
1788 self.ui.progress(_('clone'), None)
1788 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1789 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1789 (util.bytecount(total_bytes), elapsed,
1790 (util.bytecount(total_bytes), elapsed,
1790 util.bytecount(total_bytes / elapsed)))
1791 util.bytecount(total_bytes / elapsed)))
1791
1792
1792 # new requirements = old non-format requirements +
1793 # new requirements = old non-format requirements +
1793 # new format-related
1794 # new format-related
1794 # requirements from the streamed-in repository
1795 # requirements from the streamed-in repository
1795 requirements.update(set(self.requirements) - self.supportedformats)
1796 requirements.update(set(self.requirements) - self.supportedformats)
1796 self._applyrequirements(requirements)
1797 self._applyrequirements(requirements)
1797 self._writerequirements()
1798 self._writerequirements()
1798
1799
1799 if rbranchmap:
1800 if rbranchmap:
1800 rbheads = []
1801 rbheads = []
1801 closed = []
1802 closed = []
1802 for bheads in rbranchmap.itervalues():
1803 for bheads in rbranchmap.itervalues():
1803 rbheads.extend(bheads)
1804 rbheads.extend(bheads)
1804 for h in bheads:
1805 for h in bheads:
1805 r = self.changelog.rev(h)
1806 r = self.changelog.rev(h)
1806 b, c = self.changelog.branchinfo(r)
1807 b, c = self.changelog.branchinfo(r)
1807 if c:
1808 if c:
1808 closed.append(h)
1809 closed.append(h)
1809
1810
1810 if rbheads:
1811 if rbheads:
1811 rtiprev = max((int(self.changelog.rev(node))
1812 rtiprev = max((int(self.changelog.rev(node))
1812 for node in rbheads))
1813 for node in rbheads))
1813 cache = branchmap.branchcache(rbranchmap,
1814 cache = branchmap.branchcache(rbranchmap,
1814 self[rtiprev].node(),
1815 self[rtiprev].node(),
1815 rtiprev,
1816 rtiprev,
1816 closednodes=closed)
1817 closednodes=closed)
1817 # Try to stick it as low as possible
1818 # Try to stick it as low as possible
1818 # filter above served are unlikely to be fetch from a clone
1819 # filter above served are unlikely to be fetch from a clone
1819 for candidate in ('base', 'immutable', 'served'):
1820 for candidate in ('base', 'immutable', 'served'):
1820 rview = self.filtered(candidate)
1821 rview = self.filtered(candidate)
1821 if cache.validfor(rview):
1822 if cache.validfor(rview):
1822 self._branchcaches[candidate] = cache
1823 self._branchcaches[candidate] = cache
1823 cache.write(rview)
1824 cache.write(rview)
1824 break
1825 break
1825 self.invalidate()
1826 self.invalidate()
1826 return len(self.heads()) + 1
1827 return len(self.heads()) + 1
1827 finally:
1828 finally:
1828 lock.release()
1829 lock.release()
1829
1830
1830 def clone(self, remote, heads=[], stream=None):
1831 def clone(self, remote, heads=[], stream=None):
1831 '''clone remote repository.
1832 '''clone remote repository.
1832
1833
1833 keyword arguments:
1834 keyword arguments:
1834 heads: list of revs to clone (forces use of pull)
1835 heads: list of revs to clone (forces use of pull)
1835 stream: use streaming clone if possible'''
1836 stream: use streaming clone if possible'''
1836
1837
1837 # now, all clients that can request uncompressed clones can
1838 # now, all clients that can request uncompressed clones can
1838 # read repo formats supported by all servers that can serve
1839 # read repo formats supported by all servers that can serve
1839 # them.
1840 # them.
1840
1841
1841 # if revlog format changes, client will have to check version
1842 # if revlog format changes, client will have to check version
1842 # and format flags on "stream" capability, and use
1843 # and format flags on "stream" capability, and use
1843 # uncompressed only if compatible.
1844 # uncompressed only if compatible.
1844
1845
1845 if stream is None:
1846 if stream is None:
1846 # if the server explicitly prefers to stream (for fast LANs)
1847 # if the server explicitly prefers to stream (for fast LANs)
1847 stream = remote.capable('stream-preferred')
1848 stream = remote.capable('stream-preferred')
1848
1849
1849 if stream and not heads:
1850 if stream and not heads:
1850 # 'stream' means remote revlog format is revlogv1 only
1851 # 'stream' means remote revlog format is revlogv1 only
1851 if remote.capable('stream'):
1852 if remote.capable('stream'):
1852 self.stream_in(remote, set(('revlogv1',)))
1853 self.stream_in(remote, set(('revlogv1',)))
1853 else:
1854 else:
1854 # otherwise, 'streamreqs' contains the remote revlog format
1855 # otherwise, 'streamreqs' contains the remote revlog format
1855 streamreqs = remote.capable('streamreqs')
1856 streamreqs = remote.capable('streamreqs')
1856 if streamreqs:
1857 if streamreqs:
1857 streamreqs = set(streamreqs.split(','))
1858 streamreqs = set(streamreqs.split(','))
1858 # if we support it, stream in and adjust our requirements
1859 # if we support it, stream in and adjust our requirements
1859 if not streamreqs - self.supportedformats:
1860 if not streamreqs - self.supportedformats:
1860 self.stream_in(remote, streamreqs)
1861 self.stream_in(remote, streamreqs)
1861
1862
1862 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1863 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1863 try:
1864 try:
1864 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1865 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1865 ret = exchange.pull(self, remote, heads).cgresult
1866 ret = exchange.pull(self, remote, heads).cgresult
1866 finally:
1867 finally:
1867 self.ui.restoreconfig(quiet)
1868 self.ui.restoreconfig(quiet)
1868 return ret
1869 return ret
1869
1870
1870 def pushkey(self, namespace, key, old, new):
1871 def pushkey(self, namespace, key, old, new):
1871 try:
1872 try:
1872 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1873 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1873 old=old, new=new)
1874 old=old, new=new)
1874 except error.HookAbort, exc:
1875 except error.HookAbort, exc:
1875 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1876 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1876 if exc.hint:
1877 if exc.hint:
1877 self.ui.write_err(_("(%s)\n") % exc.hint)
1878 self.ui.write_err(_("(%s)\n") % exc.hint)
1878 return False
1879 return False
1879 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1880 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1880 ret = pushkey.push(self, namespace, key, old, new)
1881 ret = pushkey.push(self, namespace, key, old, new)
1881 def runhook():
1882 def runhook():
1882 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1883 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1883 ret=ret)
1884 ret=ret)
1884 self._afterlock(runhook)
1885 self._afterlock(runhook)
1885 return ret
1886 return ret
1886
1887
1887 def listkeys(self, namespace):
1888 def listkeys(self, namespace):
1888 self.hook('prelistkeys', throw=True, namespace=namespace)
1889 self.hook('prelistkeys', throw=True, namespace=namespace)
1889 self.ui.debug('listing keys for "%s"\n' % namespace)
1890 self.ui.debug('listing keys for "%s"\n' % namespace)
1890 values = pushkey.list(self, namespace)
1891 values = pushkey.list(self, namespace)
1891 self.hook('listkeys', namespace=namespace, values=values)
1892 self.hook('listkeys', namespace=namespace, values=values)
1892 return values
1893 return values
1893
1894
1894 def debugwireargs(self, one, two, three=None, four=None, five=None):
1895 def debugwireargs(self, one, two, three=None, four=None, five=None):
1895 '''used to test argument passing over the wire'''
1896 '''used to test argument passing over the wire'''
1896 return "%s %s %s %s %s" % (one, two, three, four, five)
1897 return "%s %s %s %s %s" % (one, two, three, four, five)
1897
1898
1898 def savecommitmessage(self, text):
1899 def savecommitmessage(self, text):
1899 fp = self.vfs('last-message.txt', 'wb')
1900 fp = self.vfs('last-message.txt', 'wb')
1900 try:
1901 try:
1901 fp.write(text)
1902 fp.write(text)
1902 finally:
1903 finally:
1903 fp.close()
1904 fp.close()
1904 return self.pathto(fp.name[len(self.root) + 1:])
1905 return self.pathto(fp.name[len(self.root) + 1:])
1905
1906
1906 # used to avoid circular references so destructors work
1907 # used to avoid circular references so destructors work
1907 def aftertrans(files):
1908 def aftertrans(files):
1908 renamefiles = [tuple(t) for t in files]
1909 renamefiles = [tuple(t) for t in files]
1909 def a():
1910 def a():
1910 for vfs, src, dest in renamefiles:
1911 for vfs, src, dest in renamefiles:
1911 try:
1912 try:
1912 vfs.rename(src, dest)
1913 vfs.rename(src, dest)
1913 except OSError: # journal file does not yet exist
1914 except OSError: # journal file does not yet exist
1914 pass
1915 pass
1915 return a
1916 return a
1916
1917
1917 def undoname(fn):
1918 def undoname(fn):
1918 base, name = os.path.split(fn)
1919 base, name = os.path.split(fn)
1919 assert name.startswith('journal')
1920 assert name.startswith('journal')
1920 return os.path.join(base, name.replace('journal', 'undo', 1))
1921 return os.path.join(base, name.replace('journal', 'undo', 1))
1921
1922
1922 def instance(ui, path, create):
1923 def instance(ui, path, create):
1923 return localrepository(ui, util.urllocalpath(path), create)
1924 return localrepository(ui, util.urllocalpath(path), create)
1924
1925
1925 def islocal(path):
1926 def islocal(path):
1926 return True
1927 return True
@@ -1,67 +1,84 b''
1
1
2 $ cat << EOF > buggylocking.py
2 $ cat << EOF > buggylocking.py
3 > """A small extension that acquire locks in the wrong order
3 > """A small extension that acquire locks in the wrong order
4 > """
4 > """
5 >
5 >
6 > from mercurial import cmdutil
6 > from mercurial import cmdutil
7 >
7 >
8 > cmdtable = {}
8 > cmdtable = {}
9 > command = cmdutil.command(cmdtable)
9 > command = cmdutil.command(cmdtable)
10 >
10 >
11 > @command('buggylocking', [], '')
11 > @command('buggylocking', [], '')
12 > def buggylocking(ui, repo):
12 > def buggylocking(ui, repo):
13 > tr = repo.transaction('buggy')
13 > tr = repo.transaction('buggy')
14 > lo = repo.lock()
14 > lo = repo.lock()
15 > wl = repo.wlock()
15 > wl = repo.wlock()
16 > wl.release()
16 > wl.release()
17 > lo.release()
17 > lo.release()
18 >
19 > @command('properlocking', [], '')
20 > def properlocking(ui, repo):
21 > """check that reentrance is fine"""
22 > wl = repo.wlock()
23 > lo = repo.lock()
24 > tr = repo.transaction('proper')
25 > tr2 = repo.transaction('proper')
26 > lo2 = repo.lock()
27 > wl2 = repo.wlock()
28 > wl2.release()
29 > lo2.release()
30 > tr2.close()
31 > tr.close()
32 > lo.release()
33 > wl.release()
18 > EOF
34 > EOF
19
35
20 $ cat << EOF >> $HGRCPATH
36 $ cat << EOF >> $HGRCPATH
21 > [extensions]
37 > [extensions]
22 > buggylocking=$TESTTMP/buggylocking.py
38 > buggylocking=$TESTTMP/buggylocking.py
23 > [devel]
39 > [devel]
24 > all=1
40 > all=1
25 > EOF
41 > EOF
26
42
27 $ hg init lock-checker
43 $ hg init lock-checker
28 $ cd lock-checker
44 $ cd lock-checker
29 $ hg buggylocking
45 $ hg buggylocking
30 transaction with no lock
46 transaction with no lock
31 "lock" taken before "wlock"
47 "lock" taken before "wlock"
32 $ cat << EOF >> $HGRCPATH
48 $ cat << EOF >> $HGRCPATH
33 > [devel]
49 > [devel]
34 > all=0
50 > all=0
35 > check-locks=1
51 > check-locks=1
36 > EOF
52 > EOF
37 $ hg buggylocking
53 $ hg buggylocking
38 transaction with no lock
54 transaction with no lock
39 "lock" taken before "wlock"
55 "lock" taken before "wlock"
40 $ hg buggylocking --traceback
56 $ hg buggylocking --traceback
41 transaction with no lock
57 transaction with no lock
42 at:
58 at:
43 */hg:* in * (glob)
59 */hg:* in * (glob)
44 */mercurial/dispatch.py:* in run (glob)
60 */mercurial/dispatch.py:* in run (glob)
45 */mercurial/dispatch.py:* in dispatch (glob)
61 */mercurial/dispatch.py:* in dispatch (glob)
46 */mercurial/dispatch.py:* in _runcatch (glob)
62 */mercurial/dispatch.py:* in _runcatch (glob)
47 */mercurial/dispatch.py:* in _dispatch (glob)
63 */mercurial/dispatch.py:* in _dispatch (glob)
48 */mercurial/dispatch.py:* in runcommand (glob)
64 */mercurial/dispatch.py:* in runcommand (glob)
49 */mercurial/dispatch.py:* in _runcommand (glob)
65 */mercurial/dispatch.py:* in _runcommand (glob)
50 */mercurial/dispatch.py:* in checkargs (glob)
66 */mercurial/dispatch.py:* in checkargs (glob)
51 */mercurial/dispatch.py:* in <lambda> (glob)
67 */mercurial/dispatch.py:* in <lambda> (glob)
52 */mercurial/util.py:* in check (glob)
68 */mercurial/util.py:* in check (glob)
53 $TESTTMP/buggylocking.py:* in buggylocking (glob)
69 $TESTTMP/buggylocking.py:* in buggylocking (glob)
54 "lock" taken before "wlock"
70 "lock" taken before "wlock"
55 at:
71 at:
56 */hg:* in * (glob)
72 */hg:* in * (glob)
57 */mercurial/dispatch.py:* in run (glob)
73 */mercurial/dispatch.py:* in run (glob)
58 */mercurial/dispatch.py:* in dispatch (glob)
74 */mercurial/dispatch.py:* in dispatch (glob)
59 */mercurial/dispatch.py:* in _runcatch (glob)
75 */mercurial/dispatch.py:* in _runcatch (glob)
60 */mercurial/dispatch.py:* in _dispatch (glob)
76 */mercurial/dispatch.py:* in _dispatch (glob)
61 */mercurial/dispatch.py:* in runcommand (glob)
77 */mercurial/dispatch.py:* in runcommand (glob)
62 */mercurial/dispatch.py:* in _runcommand (glob)
78 */mercurial/dispatch.py:* in _runcommand (glob)
63 */mercurial/dispatch.py:* in checkargs (glob)
79 */mercurial/dispatch.py:* in checkargs (glob)
64 */mercurial/dispatch.py:* in <lambda> (glob)
80 */mercurial/dispatch.py:* in <lambda> (glob)
65 */mercurial/util.py:* in check (glob)
81 */mercurial/util.py:* in check (glob)
66 $TESTTMP/buggylocking.py:* in buggylocking (glob)
82 $TESTTMP/buggylocking.py:* in buggylocking (glob)
83 $ hg properlocking
67 $ cd ..
84 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now