##// END OF EJS Templates
commit: avoid match.files() in conditions...
Martin von Zweigbergk -
r25274:14408524 default
parent child Browse files
Show More
@@ -1,1938 +1,1938 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced, exc:
155 except error.PushRaced, exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
218 self.opener = self.vfs
219 self.baseui = baseui
219 self.baseui = baseui
220 self.ui = baseui.copy()
220 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
225 self._phasedefaults = []
226 try:
226 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
229 except IOError:
229 except IOError:
230 pass
230 pass
231
231
232 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
239 else:
239 else:
240 self.supported = self._basesupported
240 self.supported = self._basesupported
241
241
242 if not self.vfs.isdir():
242 if not self.vfs.isdir():
243 if create:
243 if create:
244 if not self.wvfs.exists():
244 if not self.wvfs.exists():
245 self.wvfs.makedirs()
245 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
250 self.requirements.add("store")
250 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
252 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
255 # create an invalid changelog
255 # create an invalid changelog
256 self.vfs.append(
256 self.vfs.append(
257 "00changelog.i",
257 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
260 )
260 )
261 if self.ui.configbool('format', 'generaldelta', False):
261 if self.ui.configbool('format', 'generaldelta', False):
262 self.requirements.add("generaldelta")
262 self.requirements.add("generaldelta")
263 if self.ui.configbool('experimental', 'treemanifest', False):
263 if self.ui.configbool('experimental', 'treemanifest', False):
264 self.requirements.add("treemanifest")
264 self.requirements.add("treemanifest")
265 if self.ui.configbool('experimental', 'manifestv2', False):
265 if self.ui.configbool('experimental', 'manifestv2', False):
266 self.requirements.add("manifestv2")
266 self.requirements.add("manifestv2")
267 else:
267 else:
268 raise error.RepoError(_("repository %s not found") % path)
268 raise error.RepoError(_("repository %s not found") % path)
269 elif create:
269 elif create:
270 raise error.RepoError(_("repository %s already exists") % path)
270 raise error.RepoError(_("repository %s already exists") % path)
271 else:
271 else:
272 try:
272 try:
273 self.requirements = scmutil.readrequires(
273 self.requirements = scmutil.readrequires(
274 self.vfs, self.supported)
274 self.vfs, self.supported)
275 except IOError, inst:
275 except IOError, inst:
276 if inst.errno != errno.ENOENT:
276 if inst.errno != errno.ENOENT:
277 raise
277 raise
278
278
279 self.sharedpath = self.path
279 self.sharedpath = self.path
280 try:
280 try:
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 realpath=True)
282 realpath=True)
283 s = vfs.base
283 s = vfs.base
284 if not vfs.exists():
284 if not vfs.exists():
285 raise error.RepoError(
285 raise error.RepoError(
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 self.sharedpath = s
287 self.sharedpath = s
288 except IOError, inst:
288 except IOError, inst:
289 if inst.errno != errno.ENOENT:
289 if inst.errno != errno.ENOENT:
290 raise
290 raise
291
291
292 self.store = store.store(
292 self.store = store.store(
293 self.requirements, self.sharedpath, scmutil.vfs)
293 self.requirements, self.sharedpath, scmutil.vfs)
294 self.spath = self.store.path
294 self.spath = self.store.path
295 self.svfs = self.store.vfs
295 self.svfs = self.store.vfs
296 self.sopener = self.svfs
296 self.sopener = self.svfs
297 self.sjoin = self.store.join
297 self.sjoin = self.store.join
298 self.vfs.createmode = self.store.createmode
298 self.vfs.createmode = self.store.createmode
299 self._applyopenerreqs()
299 self._applyopenerreqs()
300 if create:
300 if create:
301 self._writerequirements()
301 self._writerequirements()
302
302
303
303
304 self._branchcaches = {}
304 self._branchcaches = {}
305 self._revbranchcache = None
305 self._revbranchcache = None
306 self.filterpats = {}
306 self.filterpats = {}
307 self._datafilters = {}
307 self._datafilters = {}
308 self._transref = self._lockref = self._wlockref = None
308 self._transref = self._lockref = self._wlockref = None
309
309
310 # A cache for various files under .hg/ that tracks file changes,
310 # A cache for various files under .hg/ that tracks file changes,
311 # (used by the filecache decorator)
311 # (used by the filecache decorator)
312 #
312 #
313 # Maps a property name to its util.filecacheentry
313 # Maps a property name to its util.filecacheentry
314 self._filecache = {}
314 self._filecache = {}
315
315
316 # hold sets of revision to be filtered
316 # hold sets of revision to be filtered
317 # should be cleared when something might have changed the filter value:
317 # should be cleared when something might have changed the filter value:
318 # - new changesets,
318 # - new changesets,
319 # - phase change,
319 # - phase change,
320 # - new obsolescence marker,
320 # - new obsolescence marker,
321 # - working directory parent change,
321 # - working directory parent change,
322 # - bookmark changes
322 # - bookmark changes
323 self.filteredrevcache = {}
323 self.filteredrevcache = {}
324
324
325 # generic mapping between names and nodes
325 # generic mapping between names and nodes
326 self.names = namespaces.namespaces()
326 self.names = namespaces.namespaces()
327
327
328 def close(self):
328 def close(self):
329 self._writecaches()
329 self._writecaches()
330
330
331 def _writecaches(self):
331 def _writecaches(self):
332 if self._revbranchcache:
332 if self._revbranchcache:
333 self._revbranchcache.write()
333 self._revbranchcache.write()
334
334
335 def _restrictcapabilities(self, caps):
335 def _restrictcapabilities(self, caps):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 caps = set(caps)
337 caps = set(caps)
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 caps.add('bundle2=' + urllib.quote(capsblob))
339 caps.add('bundle2=' + urllib.quote(capsblob))
340 return caps
340 return caps
341
341
342 def _applyopenerreqs(self):
342 def _applyopenerreqs(self):
343 self.svfs.options = dict((r, 1) for r in self.requirements
343 self.svfs.options = dict((r, 1) for r in self.requirements
344 if r in self.openerreqs)
344 if r in self.openerreqs)
345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
346 if chunkcachesize is not None:
346 if chunkcachesize is not None:
347 self.svfs.options['chunkcachesize'] = chunkcachesize
347 self.svfs.options['chunkcachesize'] = chunkcachesize
348 maxchainlen = self.ui.configint('format', 'maxchainlen')
348 maxchainlen = self.ui.configint('format', 'maxchainlen')
349 if maxchainlen is not None:
349 if maxchainlen is not None:
350 self.svfs.options['maxchainlen'] = maxchainlen
350 self.svfs.options['maxchainlen'] = maxchainlen
351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
352 if manifestcachesize is not None:
352 if manifestcachesize is not None:
353 self.svfs.options['manifestcachesize'] = manifestcachesize
353 self.svfs.options['manifestcachesize'] = manifestcachesize
354
354
355 def _writerequirements(self):
355 def _writerequirements(self):
356 scmutil.writerequires(self.vfs, self.requirements)
356 scmutil.writerequires(self.vfs, self.requirements)
357
357
358 def _checknested(self, path):
358 def _checknested(self, path):
359 """Determine if path is a legal nested repository."""
359 """Determine if path is a legal nested repository."""
360 if not path.startswith(self.root):
360 if not path.startswith(self.root):
361 return False
361 return False
362 subpath = path[len(self.root) + 1:]
362 subpath = path[len(self.root) + 1:]
363 normsubpath = util.pconvert(subpath)
363 normsubpath = util.pconvert(subpath)
364
364
365 # XXX: Checking against the current working copy is wrong in
365 # XXX: Checking against the current working copy is wrong in
366 # the sense that it can reject things like
366 # the sense that it can reject things like
367 #
367 #
368 # $ hg cat -r 10 sub/x.txt
368 # $ hg cat -r 10 sub/x.txt
369 #
369 #
370 # if sub/ is no longer a subrepository in the working copy
370 # if sub/ is no longer a subrepository in the working copy
371 # parent revision.
371 # parent revision.
372 #
372 #
373 # However, it can of course also allow things that would have
373 # However, it can of course also allow things that would have
374 # been rejected before, such as the above cat command if sub/
374 # been rejected before, such as the above cat command if sub/
375 # is a subrepository now, but was a normal directory before.
375 # is a subrepository now, but was a normal directory before.
376 # The old path auditor would have rejected by mistake since it
376 # The old path auditor would have rejected by mistake since it
377 # panics when it sees sub/.hg/.
377 # panics when it sees sub/.hg/.
378 #
378 #
379 # All in all, checking against the working copy seems sensible
379 # All in all, checking against the working copy seems sensible
380 # since we want to prevent access to nested repositories on
380 # since we want to prevent access to nested repositories on
381 # the filesystem *now*.
381 # the filesystem *now*.
382 ctx = self[None]
382 ctx = self[None]
383 parts = util.splitpath(subpath)
383 parts = util.splitpath(subpath)
384 while parts:
384 while parts:
385 prefix = '/'.join(parts)
385 prefix = '/'.join(parts)
386 if prefix in ctx.substate:
386 if prefix in ctx.substate:
387 if prefix == normsubpath:
387 if prefix == normsubpath:
388 return True
388 return True
389 else:
389 else:
390 sub = ctx.sub(prefix)
390 sub = ctx.sub(prefix)
391 return sub.checknested(subpath[len(prefix) + 1:])
391 return sub.checknested(subpath[len(prefix) + 1:])
392 else:
392 else:
393 parts.pop()
393 parts.pop()
394 return False
394 return False
395
395
396 def peer(self):
396 def peer(self):
397 return localpeer(self) # not cached to avoid reference cycle
397 return localpeer(self) # not cached to avoid reference cycle
398
398
399 def unfiltered(self):
399 def unfiltered(self):
400 """Return unfiltered version of the repository
400 """Return unfiltered version of the repository
401
401
402 Intended to be overwritten by filtered repo."""
402 Intended to be overwritten by filtered repo."""
403 return self
403 return self
404
404
405 def filtered(self, name):
405 def filtered(self, name):
406 """Return a filtered version of a repository"""
406 """Return a filtered version of a repository"""
407 # build a new class with the mixin and the current class
407 # build a new class with the mixin and the current class
408 # (possibly subclass of the repo)
408 # (possibly subclass of the repo)
409 class proxycls(repoview.repoview, self.unfiltered().__class__):
409 class proxycls(repoview.repoview, self.unfiltered().__class__):
410 pass
410 pass
411 return proxycls(self, name)
411 return proxycls(self, name)
412
412
413 @repofilecache('bookmarks')
413 @repofilecache('bookmarks')
414 def _bookmarks(self):
414 def _bookmarks(self):
415 return bookmarks.bmstore(self)
415 return bookmarks.bmstore(self)
416
416
417 @repofilecache('bookmarks.current')
417 @repofilecache('bookmarks.current')
418 def _activebookmark(self):
418 def _activebookmark(self):
419 return bookmarks.readactive(self)
419 return bookmarks.readactive(self)
420
420
421 def bookmarkheads(self, bookmark):
421 def bookmarkheads(self, bookmark):
422 name = bookmark.split('@', 1)[0]
422 name = bookmark.split('@', 1)[0]
423 heads = []
423 heads = []
424 for mark, n in self._bookmarks.iteritems():
424 for mark, n in self._bookmarks.iteritems():
425 if mark.split('@', 1)[0] == name:
425 if mark.split('@', 1)[0] == name:
426 heads.append(n)
426 heads.append(n)
427 return heads
427 return heads
428
428
429 @storecache('phaseroots')
429 @storecache('phaseroots')
430 def _phasecache(self):
430 def _phasecache(self):
431 return phases.phasecache(self, self._phasedefaults)
431 return phases.phasecache(self, self._phasedefaults)
432
432
433 @storecache('obsstore')
433 @storecache('obsstore')
434 def obsstore(self):
434 def obsstore(self):
435 # read default format for new obsstore.
435 # read default format for new obsstore.
436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
437 # rely on obsstore class default when possible.
437 # rely on obsstore class default when possible.
438 kwargs = {}
438 kwargs = {}
439 if defaultformat is not None:
439 if defaultformat is not None:
440 kwargs['defaultformat'] = defaultformat
440 kwargs['defaultformat'] = defaultformat
441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
442 store = obsolete.obsstore(self.svfs, readonly=readonly,
442 store = obsolete.obsstore(self.svfs, readonly=readonly,
443 **kwargs)
443 **kwargs)
444 if store and readonly:
444 if store and readonly:
445 self.ui.warn(
445 self.ui.warn(
446 _('obsolete feature not enabled but %i markers found!\n')
446 _('obsolete feature not enabled but %i markers found!\n')
447 % len(list(store)))
447 % len(list(store)))
448 return store
448 return store
449
449
450 @storecache('00changelog.i')
450 @storecache('00changelog.i')
451 def changelog(self):
451 def changelog(self):
452 c = changelog.changelog(self.svfs)
452 c = changelog.changelog(self.svfs)
453 if 'HG_PENDING' in os.environ:
453 if 'HG_PENDING' in os.environ:
454 p = os.environ['HG_PENDING']
454 p = os.environ['HG_PENDING']
455 if p.startswith(self.root):
455 if p.startswith(self.root):
456 c.readpending('00changelog.i.a')
456 c.readpending('00changelog.i.a')
457 return c
457 return c
458
458
459 @storecache('00manifest.i')
459 @storecache('00manifest.i')
460 def manifest(self):
460 def manifest(self):
461 return manifest.manifest(self.svfs)
461 return manifest.manifest(self.svfs)
462
462
463 def dirlog(self, dir):
463 def dirlog(self, dir):
464 return self.manifest.dirlog(dir)
464 return self.manifest.dirlog(dir)
465
465
466 @repofilecache('dirstate')
466 @repofilecache('dirstate')
467 def dirstate(self):
467 def dirstate(self):
468 warned = [0]
468 warned = [0]
469 def validate(node):
469 def validate(node):
470 try:
470 try:
471 self.changelog.rev(node)
471 self.changelog.rev(node)
472 return node
472 return node
473 except error.LookupError:
473 except error.LookupError:
474 if not warned[0]:
474 if not warned[0]:
475 warned[0] = True
475 warned[0] = True
476 self.ui.warn(_("warning: ignoring unknown"
476 self.ui.warn(_("warning: ignoring unknown"
477 " working parent %s!\n") % short(node))
477 " working parent %s!\n") % short(node))
478 return nullid
478 return nullid
479
479
480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
481
481
482 def __getitem__(self, changeid):
482 def __getitem__(self, changeid):
483 if changeid is None:
483 if changeid is None:
484 return context.workingctx(self)
484 return context.workingctx(self)
485 if isinstance(changeid, slice):
485 if isinstance(changeid, slice):
486 return [context.changectx(self, i)
486 return [context.changectx(self, i)
487 for i in xrange(*changeid.indices(len(self)))
487 for i in xrange(*changeid.indices(len(self)))
488 if i not in self.changelog.filteredrevs]
488 if i not in self.changelog.filteredrevs]
489 return context.changectx(self, changeid)
489 return context.changectx(self, changeid)
490
490
491 def __contains__(self, changeid):
491 def __contains__(self, changeid):
492 try:
492 try:
493 self[changeid]
493 self[changeid]
494 return True
494 return True
495 except error.RepoLookupError:
495 except error.RepoLookupError:
496 return False
496 return False
497
497
498 def __nonzero__(self):
498 def __nonzero__(self):
499 return True
499 return True
500
500
501 def __len__(self):
501 def __len__(self):
502 return len(self.changelog)
502 return len(self.changelog)
503
503
504 def __iter__(self):
504 def __iter__(self):
505 return iter(self.changelog)
505 return iter(self.changelog)
506
506
507 def revs(self, expr, *args):
507 def revs(self, expr, *args):
508 '''Return a list of revisions matching the given revset'''
508 '''Return a list of revisions matching the given revset'''
509 expr = revset.formatspec(expr, *args)
509 expr = revset.formatspec(expr, *args)
510 m = revset.match(None, expr)
510 m = revset.match(None, expr)
511 return m(self)
511 return m(self)
512
512
513 def set(self, expr, *args):
513 def set(self, expr, *args):
514 '''
514 '''
515 Yield a context for each matching revision, after doing arg
515 Yield a context for each matching revision, after doing arg
516 replacement via revset.formatspec
516 replacement via revset.formatspec
517 '''
517 '''
518 for r in self.revs(expr, *args):
518 for r in self.revs(expr, *args):
519 yield self[r]
519 yield self[r]
520
520
521 def url(self):
521 def url(self):
522 return 'file:' + self.root
522 return 'file:' + self.root
523
523
524 def hook(self, name, throw=False, **args):
524 def hook(self, name, throw=False, **args):
525 """Call a hook, passing this repo instance.
525 """Call a hook, passing this repo instance.
526
526
527 This a convenience method to aid invoking hooks. Extensions likely
527 This a convenience method to aid invoking hooks. Extensions likely
528 won't call this unless they have registered a custom hook or are
528 won't call this unless they have registered a custom hook or are
529 replacing code that is expected to call a hook.
529 replacing code that is expected to call a hook.
530 """
530 """
531 return hook.hook(self.ui, self, name, throw, **args)
531 return hook.hook(self.ui, self, name, throw, **args)
532
532
533 @unfilteredmethod
533 @unfilteredmethod
534 def _tag(self, names, node, message, local, user, date, extra={},
534 def _tag(self, names, node, message, local, user, date, extra={},
535 editor=False):
535 editor=False):
536 if isinstance(names, str):
536 if isinstance(names, str):
537 names = (names,)
537 names = (names,)
538
538
539 branches = self.branchmap()
539 branches = self.branchmap()
540 for name in names:
540 for name in names:
541 self.hook('pretag', throw=True, node=hex(node), tag=name,
541 self.hook('pretag', throw=True, node=hex(node), tag=name,
542 local=local)
542 local=local)
543 if name in branches:
543 if name in branches:
544 self.ui.warn(_("warning: tag %s conflicts with existing"
544 self.ui.warn(_("warning: tag %s conflicts with existing"
545 " branch name\n") % name)
545 " branch name\n") % name)
546
546
547 def writetags(fp, names, munge, prevtags):
547 def writetags(fp, names, munge, prevtags):
548 fp.seek(0, 2)
548 fp.seek(0, 2)
549 if prevtags and prevtags[-1] != '\n':
549 if prevtags and prevtags[-1] != '\n':
550 fp.write('\n')
550 fp.write('\n')
551 for name in names:
551 for name in names:
552 if munge:
552 if munge:
553 m = munge(name)
553 m = munge(name)
554 else:
554 else:
555 m = name
555 m = name
556
556
557 if (self._tagscache.tagtypes and
557 if (self._tagscache.tagtypes and
558 name in self._tagscache.tagtypes):
558 name in self._tagscache.tagtypes):
559 old = self.tags().get(name, nullid)
559 old = self.tags().get(name, nullid)
560 fp.write('%s %s\n' % (hex(old), m))
560 fp.write('%s %s\n' % (hex(old), m))
561 fp.write('%s %s\n' % (hex(node), m))
561 fp.write('%s %s\n' % (hex(node), m))
562 fp.close()
562 fp.close()
563
563
564 prevtags = ''
564 prevtags = ''
565 if local:
565 if local:
566 try:
566 try:
567 fp = self.vfs('localtags', 'r+')
567 fp = self.vfs('localtags', 'r+')
568 except IOError:
568 except IOError:
569 fp = self.vfs('localtags', 'a')
569 fp = self.vfs('localtags', 'a')
570 else:
570 else:
571 prevtags = fp.read()
571 prevtags = fp.read()
572
572
573 # local tags are stored in the current charset
573 # local tags are stored in the current charset
574 writetags(fp, names, None, prevtags)
574 writetags(fp, names, None, prevtags)
575 for name in names:
575 for name in names:
576 self.hook('tag', node=hex(node), tag=name, local=local)
576 self.hook('tag', node=hex(node), tag=name, local=local)
577 return
577 return
578
578
579 try:
579 try:
580 fp = self.wfile('.hgtags', 'rb+')
580 fp = self.wfile('.hgtags', 'rb+')
581 except IOError, e:
581 except IOError, e:
582 if e.errno != errno.ENOENT:
582 if e.errno != errno.ENOENT:
583 raise
583 raise
584 fp = self.wfile('.hgtags', 'ab')
584 fp = self.wfile('.hgtags', 'ab')
585 else:
585 else:
586 prevtags = fp.read()
586 prevtags = fp.read()
587
587
588 # committed tags are stored in UTF-8
588 # committed tags are stored in UTF-8
589 writetags(fp, names, encoding.fromlocal, prevtags)
589 writetags(fp, names, encoding.fromlocal, prevtags)
590
590
591 fp.close()
591 fp.close()
592
592
593 self.invalidatecaches()
593 self.invalidatecaches()
594
594
595 if '.hgtags' not in self.dirstate:
595 if '.hgtags' not in self.dirstate:
596 self[None].add(['.hgtags'])
596 self[None].add(['.hgtags'])
597
597
598 m = matchmod.exact(self.root, '', ['.hgtags'])
598 m = matchmod.exact(self.root, '', ['.hgtags'])
599 tagnode = self.commit(message, user, date, extra=extra, match=m,
599 tagnode = self.commit(message, user, date, extra=extra, match=m,
600 editor=editor)
600 editor=editor)
601
601
602 for name in names:
602 for name in names:
603 self.hook('tag', node=hex(node), tag=name, local=local)
603 self.hook('tag', node=hex(node), tag=name, local=local)
604
604
605 return tagnode
605 return tagnode
606
606
607 def tag(self, names, node, message, local, user, date, editor=False):
607 def tag(self, names, node, message, local, user, date, editor=False):
608 '''tag a revision with one or more symbolic names.
608 '''tag a revision with one or more symbolic names.
609
609
610 names is a list of strings or, when adding a single tag, names may be a
610 names is a list of strings or, when adding a single tag, names may be a
611 string.
611 string.
612
612
613 if local is True, the tags are stored in a per-repository file.
613 if local is True, the tags are stored in a per-repository file.
614 otherwise, they are stored in the .hgtags file, and a new
614 otherwise, they are stored in the .hgtags file, and a new
615 changeset is committed with the change.
615 changeset is committed with the change.
616
616
617 keyword arguments:
617 keyword arguments:
618
618
619 local: whether to store tags in non-version-controlled file
619 local: whether to store tags in non-version-controlled file
620 (default False)
620 (default False)
621
621
622 message: commit message to use if committing
622 message: commit message to use if committing
623
623
624 user: name of user to use if committing
624 user: name of user to use if committing
625
625
626 date: date tuple to use if committing'''
626 date: date tuple to use if committing'''
627
627
628 if not local:
628 if not local:
629 m = matchmod.exact(self.root, '', ['.hgtags'])
629 m = matchmod.exact(self.root, '', ['.hgtags'])
630 if any(self.status(match=m, unknown=True, ignored=True)):
630 if any(self.status(match=m, unknown=True, ignored=True)):
631 raise util.Abort(_('working copy of .hgtags is changed'),
631 raise util.Abort(_('working copy of .hgtags is changed'),
632 hint=_('please commit .hgtags manually'))
632 hint=_('please commit .hgtags manually'))
633
633
634 self.tags() # instantiate the cache
634 self.tags() # instantiate the cache
635 self._tag(names, node, message, local, user, date, editor=editor)
635 self._tag(names, node, message, local, user, date, editor=editor)
636
636
637 @filteredpropertycache
637 @filteredpropertycache
638 def _tagscache(self):
638 def _tagscache(self):
639 '''Returns a tagscache object that contains various tags related
639 '''Returns a tagscache object that contains various tags related
640 caches.'''
640 caches.'''
641
641
642 # This simplifies its cache management by having one decorated
642 # This simplifies its cache management by having one decorated
643 # function (this one) and the rest simply fetch things from it.
643 # function (this one) and the rest simply fetch things from it.
644 class tagscache(object):
644 class tagscache(object):
645 def __init__(self):
645 def __init__(self):
646 # These two define the set of tags for this repository. tags
646 # These two define the set of tags for this repository. tags
647 # maps tag name to node; tagtypes maps tag name to 'global' or
647 # maps tag name to node; tagtypes maps tag name to 'global' or
648 # 'local'. (Global tags are defined by .hgtags across all
648 # 'local'. (Global tags are defined by .hgtags across all
649 # heads, and local tags are defined in .hg/localtags.)
649 # heads, and local tags are defined in .hg/localtags.)
650 # They constitute the in-memory cache of tags.
650 # They constitute the in-memory cache of tags.
651 self.tags = self.tagtypes = None
651 self.tags = self.tagtypes = None
652
652
653 self.nodetagscache = self.tagslist = None
653 self.nodetagscache = self.tagslist = None
654
654
655 cache = tagscache()
655 cache = tagscache()
656 cache.tags, cache.tagtypes = self._findtags()
656 cache.tags, cache.tagtypes = self._findtags()
657
657
658 return cache
658 return cache
659
659
660 def tags(self):
660 def tags(self):
661 '''return a mapping of tag to node'''
661 '''return a mapping of tag to node'''
662 t = {}
662 t = {}
663 if self.changelog.filteredrevs:
663 if self.changelog.filteredrevs:
664 tags, tt = self._findtags()
664 tags, tt = self._findtags()
665 else:
665 else:
666 tags = self._tagscache.tags
666 tags = self._tagscache.tags
667 for k, v in tags.iteritems():
667 for k, v in tags.iteritems():
668 try:
668 try:
669 # ignore tags to unknown nodes
669 # ignore tags to unknown nodes
670 self.changelog.rev(v)
670 self.changelog.rev(v)
671 t[k] = v
671 t[k] = v
672 except (error.LookupError, ValueError):
672 except (error.LookupError, ValueError):
673 pass
673 pass
674 return t
674 return t
675
675
676 def _findtags(self):
676 def _findtags(self):
677 '''Do the hard work of finding tags. Return a pair of dicts
677 '''Do the hard work of finding tags. Return a pair of dicts
678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
679 maps tag name to a string like \'global\' or \'local\'.
679 maps tag name to a string like \'global\' or \'local\'.
680 Subclasses or extensions are free to add their own tags, but
680 Subclasses or extensions are free to add their own tags, but
681 should be aware that the returned dicts will be retained for the
681 should be aware that the returned dicts will be retained for the
682 duration of the localrepo object.'''
682 duration of the localrepo object.'''
683
683
684 # XXX what tagtype should subclasses/extensions use? Currently
684 # XXX what tagtype should subclasses/extensions use? Currently
685 # mq and bookmarks add tags, but do not set the tagtype at all.
685 # mq and bookmarks add tags, but do not set the tagtype at all.
686 # Should each extension invent its own tag type? Should there
686 # Should each extension invent its own tag type? Should there
687 # be one tagtype for all such "virtual" tags? Or is the status
687 # be one tagtype for all such "virtual" tags? Or is the status
688 # quo fine?
688 # quo fine?
689
689
690 alltags = {} # map tag name to (node, hist)
690 alltags = {} # map tag name to (node, hist)
691 tagtypes = {}
691 tagtypes = {}
692
692
693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
695
695
696 # Build the return dicts. Have to re-encode tag names because
696 # Build the return dicts. Have to re-encode tag names because
697 # the tags module always uses UTF-8 (in order not to lose info
697 # the tags module always uses UTF-8 (in order not to lose info
698 # writing to the cache), but the rest of Mercurial wants them in
698 # writing to the cache), but the rest of Mercurial wants them in
699 # local encoding.
699 # local encoding.
700 tags = {}
700 tags = {}
701 for (name, (node, hist)) in alltags.iteritems():
701 for (name, (node, hist)) in alltags.iteritems():
702 if node != nullid:
702 if node != nullid:
703 tags[encoding.tolocal(name)] = node
703 tags[encoding.tolocal(name)] = node
704 tags['tip'] = self.changelog.tip()
704 tags['tip'] = self.changelog.tip()
705 tagtypes = dict([(encoding.tolocal(name), value)
705 tagtypes = dict([(encoding.tolocal(name), value)
706 for (name, value) in tagtypes.iteritems()])
706 for (name, value) in tagtypes.iteritems()])
707 return (tags, tagtypes)
707 return (tags, tagtypes)
708
708
709 def tagtype(self, tagname):
709 def tagtype(self, tagname):
710 '''
710 '''
711 return the type of the given tag. result can be:
711 return the type of the given tag. result can be:
712
712
713 'local' : a local tag
713 'local' : a local tag
714 'global' : a global tag
714 'global' : a global tag
715 None : tag does not exist
715 None : tag does not exist
716 '''
716 '''
717
717
718 return self._tagscache.tagtypes.get(tagname)
718 return self._tagscache.tagtypes.get(tagname)
719
719
720 def tagslist(self):
720 def tagslist(self):
721 '''return a list of tags ordered by revision'''
721 '''return a list of tags ordered by revision'''
722 if not self._tagscache.tagslist:
722 if not self._tagscache.tagslist:
723 l = []
723 l = []
724 for t, n in self.tags().iteritems():
724 for t, n in self.tags().iteritems():
725 l.append((self.changelog.rev(n), t, n))
725 l.append((self.changelog.rev(n), t, n))
726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
727
727
728 return self._tagscache.tagslist
728 return self._tagscache.tagslist
729
729
730 def nodetags(self, node):
730 def nodetags(self, node):
731 '''return the tags associated with a node'''
731 '''return the tags associated with a node'''
732 if not self._tagscache.nodetagscache:
732 if not self._tagscache.nodetagscache:
733 nodetagscache = {}
733 nodetagscache = {}
734 for t, n in self._tagscache.tags.iteritems():
734 for t, n in self._tagscache.tags.iteritems():
735 nodetagscache.setdefault(n, []).append(t)
735 nodetagscache.setdefault(n, []).append(t)
736 for tags in nodetagscache.itervalues():
736 for tags in nodetagscache.itervalues():
737 tags.sort()
737 tags.sort()
738 self._tagscache.nodetagscache = nodetagscache
738 self._tagscache.nodetagscache = nodetagscache
739 return self._tagscache.nodetagscache.get(node, [])
739 return self._tagscache.nodetagscache.get(node, [])
740
740
741 def nodebookmarks(self, node):
741 def nodebookmarks(self, node):
742 marks = []
742 marks = []
743 for bookmark, n in self._bookmarks.iteritems():
743 for bookmark, n in self._bookmarks.iteritems():
744 if n == node:
744 if n == node:
745 marks.append(bookmark)
745 marks.append(bookmark)
746 return sorted(marks)
746 return sorted(marks)
747
747
748 def branchmap(self):
748 def branchmap(self):
749 '''returns a dictionary {branch: [branchheads]} with branchheads
749 '''returns a dictionary {branch: [branchheads]} with branchheads
750 ordered by increasing revision number'''
750 ordered by increasing revision number'''
751 branchmap.updatecache(self)
751 branchmap.updatecache(self)
752 return self._branchcaches[self.filtername]
752 return self._branchcaches[self.filtername]
753
753
754 @unfilteredmethod
754 @unfilteredmethod
755 def revbranchcache(self):
755 def revbranchcache(self):
756 if not self._revbranchcache:
756 if not self._revbranchcache:
757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
758 return self._revbranchcache
758 return self._revbranchcache
759
759
760 def branchtip(self, branch, ignoremissing=False):
760 def branchtip(self, branch, ignoremissing=False):
761 '''return the tip node for a given branch
761 '''return the tip node for a given branch
762
762
763 If ignoremissing is True, then this method will not raise an error.
763 If ignoremissing is True, then this method will not raise an error.
764 This is helpful for callers that only expect None for a missing branch
764 This is helpful for callers that only expect None for a missing branch
765 (e.g. namespace).
765 (e.g. namespace).
766
766
767 '''
767 '''
768 try:
768 try:
769 return self.branchmap().branchtip(branch)
769 return self.branchmap().branchtip(branch)
770 except KeyError:
770 except KeyError:
771 if not ignoremissing:
771 if not ignoremissing:
772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
773 else:
773 else:
774 pass
774 pass
775
775
776 def lookup(self, key):
776 def lookup(self, key):
777 return self[key].node()
777 return self[key].node()
778
778
779 def lookupbranch(self, key, remote=None):
779 def lookupbranch(self, key, remote=None):
780 repo = remote or self
780 repo = remote or self
781 if key in repo.branchmap():
781 if key in repo.branchmap():
782 return key
782 return key
783
783
784 repo = (remote and remote.local()) and remote or self
784 repo = (remote and remote.local()) and remote or self
785 return repo[key].branch()
785 return repo[key].branch()
786
786
787 def known(self, nodes):
787 def known(self, nodes):
788 nm = self.changelog.nodemap
788 nm = self.changelog.nodemap
789 pc = self._phasecache
789 pc = self._phasecache
790 result = []
790 result = []
791 for n in nodes:
791 for n in nodes:
792 r = nm.get(n)
792 r = nm.get(n)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 result.append(resp)
794 result.append(resp)
795 return result
795 return result
796
796
797 def local(self):
797 def local(self):
798 return self
798 return self
799
799
800 def cancopy(self):
800 def cancopy(self):
801 # so statichttprepo's override of local() works
801 # so statichttprepo's override of local() works
802 if not self.local():
802 if not self.local():
803 return False
803 return False
804 if not self.ui.configbool('phases', 'publish', True):
804 if not self.ui.configbool('phases', 'publish', True):
805 return True
805 return True
806 # if publishing we can't copy if there is filtered content
806 # if publishing we can't copy if there is filtered content
807 return not self.filtered('visible').changelog.filteredrevs
807 return not self.filtered('visible').changelog.filteredrevs
808
808
809 def shared(self):
809 def shared(self):
810 '''the type of shared repository (None if not shared)'''
810 '''the type of shared repository (None if not shared)'''
811 if self.sharedpath != self.path:
811 if self.sharedpath != self.path:
812 return 'store'
812 return 'store'
813 return None
813 return None
814
814
815 def join(self, f, *insidef):
815 def join(self, f, *insidef):
816 return self.vfs.join(os.path.join(f, *insidef))
816 return self.vfs.join(os.path.join(f, *insidef))
817
817
818 def wjoin(self, f, *insidef):
818 def wjoin(self, f, *insidef):
819 return self.vfs.reljoin(self.root, f, *insidef)
819 return self.vfs.reljoin(self.root, f, *insidef)
820
820
821 def file(self, f):
821 def file(self, f):
822 if f[0] == '/':
822 if f[0] == '/':
823 f = f[1:]
823 f = f[1:]
824 return filelog.filelog(self.svfs, f)
824 return filelog.filelog(self.svfs, f)
825
825
826 def changectx(self, changeid):
826 def changectx(self, changeid):
827 return self[changeid]
827 return self[changeid]
828
828
829 def parents(self, changeid=None):
829 def parents(self, changeid=None):
830 '''get list of changectxs for parents of changeid'''
830 '''get list of changectxs for parents of changeid'''
831 return self[changeid].parents()
831 return self[changeid].parents()
832
832
833 def setparents(self, p1, p2=nullid):
833 def setparents(self, p1, p2=nullid):
834 self.dirstate.beginparentchange()
834 self.dirstate.beginparentchange()
835 copies = self.dirstate.setparents(p1, p2)
835 copies = self.dirstate.setparents(p1, p2)
836 pctx = self[p1]
836 pctx = self[p1]
837 if copies:
837 if copies:
838 # Adjust copy records, the dirstate cannot do it, it
838 # Adjust copy records, the dirstate cannot do it, it
839 # requires access to parents manifests. Preserve them
839 # requires access to parents manifests. Preserve them
840 # only for entries added to first parent.
840 # only for entries added to first parent.
841 for f in copies:
841 for f in copies:
842 if f not in pctx and copies[f] in pctx:
842 if f not in pctx and copies[f] in pctx:
843 self.dirstate.copy(copies[f], f)
843 self.dirstate.copy(copies[f], f)
844 if p2 == nullid:
844 if p2 == nullid:
845 for f, s in sorted(self.dirstate.copies().items()):
845 for f, s in sorted(self.dirstate.copies().items()):
846 if f not in pctx and s not in pctx:
846 if f not in pctx and s not in pctx:
847 self.dirstate.copy(None, f)
847 self.dirstate.copy(None, f)
848 self.dirstate.endparentchange()
848 self.dirstate.endparentchange()
849
849
850 def filectx(self, path, changeid=None, fileid=None):
850 def filectx(self, path, changeid=None, fileid=None):
851 """changeid can be a changeset revision, node, or tag.
851 """changeid can be a changeset revision, node, or tag.
852 fileid can be a file revision or node."""
852 fileid can be a file revision or node."""
853 return context.filectx(self, path, changeid, fileid)
853 return context.filectx(self, path, changeid, fileid)
854
854
855 def getcwd(self):
855 def getcwd(self):
856 return self.dirstate.getcwd()
856 return self.dirstate.getcwd()
857
857
858 def pathto(self, f, cwd=None):
858 def pathto(self, f, cwd=None):
859 return self.dirstate.pathto(f, cwd)
859 return self.dirstate.pathto(f, cwd)
860
860
861 def wfile(self, f, mode='r'):
861 def wfile(self, f, mode='r'):
862 return self.wvfs(f, mode)
862 return self.wvfs(f, mode)
863
863
864 def _link(self, f):
864 def _link(self, f):
865 return self.wvfs.islink(f)
865 return self.wvfs.islink(f)
866
866
867 def _loadfilter(self, filter):
867 def _loadfilter(self, filter):
868 if filter not in self.filterpats:
868 if filter not in self.filterpats:
869 l = []
869 l = []
870 for pat, cmd in self.ui.configitems(filter):
870 for pat, cmd in self.ui.configitems(filter):
871 if cmd == '!':
871 if cmd == '!':
872 continue
872 continue
873 mf = matchmod.match(self.root, '', [pat])
873 mf = matchmod.match(self.root, '', [pat])
874 fn = None
874 fn = None
875 params = cmd
875 params = cmd
876 for name, filterfn in self._datafilters.iteritems():
876 for name, filterfn in self._datafilters.iteritems():
877 if cmd.startswith(name):
877 if cmd.startswith(name):
878 fn = filterfn
878 fn = filterfn
879 params = cmd[len(name):].lstrip()
879 params = cmd[len(name):].lstrip()
880 break
880 break
881 if not fn:
881 if not fn:
882 fn = lambda s, c, **kwargs: util.filter(s, c)
882 fn = lambda s, c, **kwargs: util.filter(s, c)
883 # Wrap old filters not supporting keyword arguments
883 # Wrap old filters not supporting keyword arguments
884 if not inspect.getargspec(fn)[2]:
884 if not inspect.getargspec(fn)[2]:
885 oldfn = fn
885 oldfn = fn
886 fn = lambda s, c, **kwargs: oldfn(s, c)
886 fn = lambda s, c, **kwargs: oldfn(s, c)
887 l.append((mf, fn, params))
887 l.append((mf, fn, params))
888 self.filterpats[filter] = l
888 self.filterpats[filter] = l
889 return self.filterpats[filter]
889 return self.filterpats[filter]
890
890
891 def _filter(self, filterpats, filename, data):
891 def _filter(self, filterpats, filename, data):
892 for mf, fn, cmd in filterpats:
892 for mf, fn, cmd in filterpats:
893 if mf(filename):
893 if mf(filename):
894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
896 break
896 break
897
897
898 return data
898 return data
899
899
900 @unfilteredpropertycache
900 @unfilteredpropertycache
901 def _encodefilterpats(self):
901 def _encodefilterpats(self):
902 return self._loadfilter('encode')
902 return self._loadfilter('encode')
903
903
904 @unfilteredpropertycache
904 @unfilteredpropertycache
905 def _decodefilterpats(self):
905 def _decodefilterpats(self):
906 return self._loadfilter('decode')
906 return self._loadfilter('decode')
907
907
908 def adddatafilter(self, name, filter):
908 def adddatafilter(self, name, filter):
909 self._datafilters[name] = filter
909 self._datafilters[name] = filter
910
910
911 def wread(self, filename):
911 def wread(self, filename):
912 if self._link(filename):
912 if self._link(filename):
913 data = self.wvfs.readlink(filename)
913 data = self.wvfs.readlink(filename)
914 else:
914 else:
915 data = self.wvfs.read(filename)
915 data = self.wvfs.read(filename)
916 return self._filter(self._encodefilterpats, filename, data)
916 return self._filter(self._encodefilterpats, filename, data)
917
917
918 def wwrite(self, filename, data, flags):
918 def wwrite(self, filename, data, flags):
919 """write ``data`` into ``filename`` in the working directory
919 """write ``data`` into ``filename`` in the working directory
920
920
921 This returns length of written (maybe decoded) data.
921 This returns length of written (maybe decoded) data.
922 """
922 """
923 data = self._filter(self._decodefilterpats, filename, data)
923 data = self._filter(self._decodefilterpats, filename, data)
924 if 'l' in flags:
924 if 'l' in flags:
925 self.wvfs.symlink(data, filename)
925 self.wvfs.symlink(data, filename)
926 else:
926 else:
927 self.wvfs.write(filename, data)
927 self.wvfs.write(filename, data)
928 if 'x' in flags:
928 if 'x' in flags:
929 self.wvfs.setflags(filename, False, True)
929 self.wvfs.setflags(filename, False, True)
930 return len(data)
930 return len(data)
931
931
932 def wwritedata(self, filename, data):
932 def wwritedata(self, filename, data):
933 return self._filter(self._decodefilterpats, filename, data)
933 return self._filter(self._decodefilterpats, filename, data)
934
934
935 def currenttransaction(self):
935 def currenttransaction(self):
936 """return the current transaction or None if non exists"""
936 """return the current transaction or None if non exists"""
937 if self._transref:
937 if self._transref:
938 tr = self._transref()
938 tr = self._transref()
939 else:
939 else:
940 tr = None
940 tr = None
941
941
942 if tr and tr.running():
942 if tr and tr.running():
943 return tr
943 return tr
944 return None
944 return None
945
945
946 def transaction(self, desc, report=None):
946 def transaction(self, desc, report=None):
947 if (self.ui.configbool('devel', 'all')
947 if (self.ui.configbool('devel', 'all')
948 or self.ui.configbool('devel', 'check-locks')):
948 or self.ui.configbool('devel', 'check-locks')):
949 l = self._lockref and self._lockref()
949 l = self._lockref and self._lockref()
950 if l is None or not l.held:
950 if l is None or not l.held:
951 scmutil.develwarn(self.ui, 'transaction with no lock')
951 scmutil.develwarn(self.ui, 'transaction with no lock')
952 tr = self.currenttransaction()
952 tr = self.currenttransaction()
953 if tr is not None:
953 if tr is not None:
954 return tr.nest()
954 return tr.nest()
955
955
956 # abort here if the journal already exists
956 # abort here if the journal already exists
957 if self.svfs.exists("journal"):
957 if self.svfs.exists("journal"):
958 raise error.RepoError(
958 raise error.RepoError(
959 _("abandoned transaction found"),
959 _("abandoned transaction found"),
960 hint=_("run 'hg recover' to clean up transaction"))
960 hint=_("run 'hg recover' to clean up transaction"))
961
961
962 idbase = "%.40f#%f" % (random.random(), time.time())
962 idbase = "%.40f#%f" % (random.random(), time.time())
963 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
963 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
964 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
964 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
965
965
966 self._writejournal(desc)
966 self._writejournal(desc)
967 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
967 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
968 if report:
968 if report:
969 rp = report
969 rp = report
970 else:
970 else:
971 rp = self.ui.warn
971 rp = self.ui.warn
972 vfsmap = {'plain': self.vfs} # root of .hg/
972 vfsmap = {'plain': self.vfs} # root of .hg/
973 # we must avoid cyclic reference between repo and transaction.
973 # we must avoid cyclic reference between repo and transaction.
974 reporef = weakref.ref(self)
974 reporef = weakref.ref(self)
975 def validate(tr):
975 def validate(tr):
976 """will run pre-closing hooks"""
976 """will run pre-closing hooks"""
977 pending = lambda: tr.writepending() and self.root or ""
977 pending = lambda: tr.writepending() and self.root or ""
978 reporef().hook('pretxnclose', throw=True, pending=pending,
978 reporef().hook('pretxnclose', throw=True, pending=pending,
979 txnname=desc, **tr.hookargs)
979 txnname=desc, **tr.hookargs)
980
980
981 tr = transaction.transaction(rp, self.sopener, vfsmap,
981 tr = transaction.transaction(rp, self.sopener, vfsmap,
982 "journal",
982 "journal",
983 "undo",
983 "undo",
984 aftertrans(renames),
984 aftertrans(renames),
985 self.store.createmode,
985 self.store.createmode,
986 validator=validate)
986 validator=validate)
987
987
988 tr.hookargs['txnid'] = txnid
988 tr.hookargs['txnid'] = txnid
989 # note: writing the fncache only during finalize mean that the file is
989 # note: writing the fncache only during finalize mean that the file is
990 # outdated when running hooks. As fncache is used for streaming clone,
990 # outdated when running hooks. As fncache is used for streaming clone,
991 # this is not expected to break anything that happen during the hooks.
991 # this is not expected to break anything that happen during the hooks.
992 tr.addfinalize('flush-fncache', self.store.write)
992 tr.addfinalize('flush-fncache', self.store.write)
993 def txnclosehook(tr2):
993 def txnclosehook(tr2):
994 """To be run if transaction is successful, will schedule a hook run
994 """To be run if transaction is successful, will schedule a hook run
995 """
995 """
996 def hook():
996 def hook():
997 reporef().hook('txnclose', throw=False, txnname=desc,
997 reporef().hook('txnclose', throw=False, txnname=desc,
998 **tr2.hookargs)
998 **tr2.hookargs)
999 reporef()._afterlock(hook)
999 reporef()._afterlock(hook)
1000 tr.addfinalize('txnclose-hook', txnclosehook)
1000 tr.addfinalize('txnclose-hook', txnclosehook)
1001 def txnaborthook(tr2):
1001 def txnaborthook(tr2):
1002 """To be run if transaction is aborted
1002 """To be run if transaction is aborted
1003 """
1003 """
1004 reporef().hook('txnabort', throw=False, txnname=desc,
1004 reporef().hook('txnabort', throw=False, txnname=desc,
1005 **tr2.hookargs)
1005 **tr2.hookargs)
1006 tr.addabort('txnabort-hook', txnaborthook)
1006 tr.addabort('txnabort-hook', txnaborthook)
1007 self._transref = weakref.ref(tr)
1007 self._transref = weakref.ref(tr)
1008 return tr
1008 return tr
1009
1009
1010 def _journalfiles(self):
1010 def _journalfiles(self):
1011 return ((self.svfs, 'journal'),
1011 return ((self.svfs, 'journal'),
1012 (self.vfs, 'journal.dirstate'),
1012 (self.vfs, 'journal.dirstate'),
1013 (self.vfs, 'journal.branch'),
1013 (self.vfs, 'journal.branch'),
1014 (self.vfs, 'journal.desc'),
1014 (self.vfs, 'journal.desc'),
1015 (self.vfs, 'journal.bookmarks'),
1015 (self.vfs, 'journal.bookmarks'),
1016 (self.svfs, 'journal.phaseroots'))
1016 (self.svfs, 'journal.phaseroots'))
1017
1017
1018 def undofiles(self):
1018 def undofiles(self):
1019 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1019 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1020
1020
1021 def _writejournal(self, desc):
1021 def _writejournal(self, desc):
1022 self.vfs.write("journal.dirstate",
1022 self.vfs.write("journal.dirstate",
1023 self.vfs.tryread("dirstate"))
1023 self.vfs.tryread("dirstate"))
1024 self.vfs.write("journal.branch",
1024 self.vfs.write("journal.branch",
1025 encoding.fromlocal(self.dirstate.branch()))
1025 encoding.fromlocal(self.dirstate.branch()))
1026 self.vfs.write("journal.desc",
1026 self.vfs.write("journal.desc",
1027 "%d\n%s\n" % (len(self), desc))
1027 "%d\n%s\n" % (len(self), desc))
1028 self.vfs.write("journal.bookmarks",
1028 self.vfs.write("journal.bookmarks",
1029 self.vfs.tryread("bookmarks"))
1029 self.vfs.tryread("bookmarks"))
1030 self.svfs.write("journal.phaseroots",
1030 self.svfs.write("journal.phaseroots",
1031 self.svfs.tryread("phaseroots"))
1031 self.svfs.tryread("phaseroots"))
1032
1032
1033 def recover(self):
1033 def recover(self):
1034 lock = self.lock()
1034 lock = self.lock()
1035 try:
1035 try:
1036 if self.svfs.exists("journal"):
1036 if self.svfs.exists("journal"):
1037 self.ui.status(_("rolling back interrupted transaction\n"))
1037 self.ui.status(_("rolling back interrupted transaction\n"))
1038 vfsmap = {'': self.svfs,
1038 vfsmap = {'': self.svfs,
1039 'plain': self.vfs,}
1039 'plain': self.vfs,}
1040 transaction.rollback(self.svfs, vfsmap, "journal",
1040 transaction.rollback(self.svfs, vfsmap, "journal",
1041 self.ui.warn)
1041 self.ui.warn)
1042 self.invalidate()
1042 self.invalidate()
1043 return True
1043 return True
1044 else:
1044 else:
1045 self.ui.warn(_("no interrupted transaction available\n"))
1045 self.ui.warn(_("no interrupted transaction available\n"))
1046 return False
1046 return False
1047 finally:
1047 finally:
1048 lock.release()
1048 lock.release()
1049
1049
1050 def rollback(self, dryrun=False, force=False):
1050 def rollback(self, dryrun=False, force=False):
1051 wlock = lock = None
1051 wlock = lock = None
1052 try:
1052 try:
1053 wlock = self.wlock()
1053 wlock = self.wlock()
1054 lock = self.lock()
1054 lock = self.lock()
1055 if self.svfs.exists("undo"):
1055 if self.svfs.exists("undo"):
1056 return self._rollback(dryrun, force)
1056 return self._rollback(dryrun, force)
1057 else:
1057 else:
1058 self.ui.warn(_("no rollback information available\n"))
1058 self.ui.warn(_("no rollback information available\n"))
1059 return 1
1059 return 1
1060 finally:
1060 finally:
1061 release(lock, wlock)
1061 release(lock, wlock)
1062
1062
1063 @unfilteredmethod # Until we get smarter cache management
1063 @unfilteredmethod # Until we get smarter cache management
1064 def _rollback(self, dryrun, force):
1064 def _rollback(self, dryrun, force):
1065 ui = self.ui
1065 ui = self.ui
1066 try:
1066 try:
1067 args = self.vfs.read('undo.desc').splitlines()
1067 args = self.vfs.read('undo.desc').splitlines()
1068 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1068 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1069 if len(args) >= 3:
1069 if len(args) >= 3:
1070 detail = args[2]
1070 detail = args[2]
1071 oldtip = oldlen - 1
1071 oldtip = oldlen - 1
1072
1072
1073 if detail and ui.verbose:
1073 if detail and ui.verbose:
1074 msg = (_('repository tip rolled back to revision %s'
1074 msg = (_('repository tip rolled back to revision %s'
1075 ' (undo %s: %s)\n')
1075 ' (undo %s: %s)\n')
1076 % (oldtip, desc, detail))
1076 % (oldtip, desc, detail))
1077 else:
1077 else:
1078 msg = (_('repository tip rolled back to revision %s'
1078 msg = (_('repository tip rolled back to revision %s'
1079 ' (undo %s)\n')
1079 ' (undo %s)\n')
1080 % (oldtip, desc))
1080 % (oldtip, desc))
1081 except IOError:
1081 except IOError:
1082 msg = _('rolling back unknown transaction\n')
1082 msg = _('rolling back unknown transaction\n')
1083 desc = None
1083 desc = None
1084
1084
1085 if not force and self['.'] != self['tip'] and desc == 'commit':
1085 if not force and self['.'] != self['tip'] and desc == 'commit':
1086 raise util.Abort(
1086 raise util.Abort(
1087 _('rollback of last commit while not checked out '
1087 _('rollback of last commit while not checked out '
1088 'may lose data'), hint=_('use -f to force'))
1088 'may lose data'), hint=_('use -f to force'))
1089
1089
1090 ui.status(msg)
1090 ui.status(msg)
1091 if dryrun:
1091 if dryrun:
1092 return 0
1092 return 0
1093
1093
1094 parents = self.dirstate.parents()
1094 parents = self.dirstate.parents()
1095 self.destroying()
1095 self.destroying()
1096 vfsmap = {'plain': self.vfs, '': self.svfs}
1096 vfsmap = {'plain': self.vfs, '': self.svfs}
1097 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1097 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1098 if self.vfs.exists('undo.bookmarks'):
1098 if self.vfs.exists('undo.bookmarks'):
1099 self.vfs.rename('undo.bookmarks', 'bookmarks')
1099 self.vfs.rename('undo.bookmarks', 'bookmarks')
1100 if self.svfs.exists('undo.phaseroots'):
1100 if self.svfs.exists('undo.phaseroots'):
1101 self.svfs.rename('undo.phaseroots', 'phaseroots')
1101 self.svfs.rename('undo.phaseroots', 'phaseroots')
1102 self.invalidate()
1102 self.invalidate()
1103
1103
1104 parentgone = (parents[0] not in self.changelog.nodemap or
1104 parentgone = (parents[0] not in self.changelog.nodemap or
1105 parents[1] not in self.changelog.nodemap)
1105 parents[1] not in self.changelog.nodemap)
1106 if parentgone:
1106 if parentgone:
1107 self.vfs.rename('undo.dirstate', 'dirstate')
1107 self.vfs.rename('undo.dirstate', 'dirstate')
1108 try:
1108 try:
1109 branch = self.vfs.read('undo.branch')
1109 branch = self.vfs.read('undo.branch')
1110 self.dirstate.setbranch(encoding.tolocal(branch))
1110 self.dirstate.setbranch(encoding.tolocal(branch))
1111 except IOError:
1111 except IOError:
1112 ui.warn(_('named branch could not be reset: '
1112 ui.warn(_('named branch could not be reset: '
1113 'current branch is still \'%s\'\n')
1113 'current branch is still \'%s\'\n')
1114 % self.dirstate.branch())
1114 % self.dirstate.branch())
1115
1115
1116 self.dirstate.invalidate()
1116 self.dirstate.invalidate()
1117 parents = tuple([p.rev() for p in self.parents()])
1117 parents = tuple([p.rev() for p in self.parents()])
1118 if len(parents) > 1:
1118 if len(parents) > 1:
1119 ui.status(_('working directory now based on '
1119 ui.status(_('working directory now based on '
1120 'revisions %d and %d\n') % parents)
1120 'revisions %d and %d\n') % parents)
1121 else:
1121 else:
1122 ui.status(_('working directory now based on '
1122 ui.status(_('working directory now based on '
1123 'revision %d\n') % parents)
1123 'revision %d\n') % parents)
1124 ms = mergemod.mergestate(self)
1124 ms = mergemod.mergestate(self)
1125 ms.reset(self['.'].node())
1125 ms.reset(self['.'].node())
1126
1126
1127 # TODO: if we know which new heads may result from this rollback, pass
1127 # TODO: if we know which new heads may result from this rollback, pass
1128 # them to destroy(), which will prevent the branchhead cache from being
1128 # them to destroy(), which will prevent the branchhead cache from being
1129 # invalidated.
1129 # invalidated.
1130 self.destroyed()
1130 self.destroyed()
1131 return 0
1131 return 0
1132
1132
1133 def invalidatecaches(self):
1133 def invalidatecaches(self):
1134
1134
1135 if '_tagscache' in vars(self):
1135 if '_tagscache' in vars(self):
1136 # can't use delattr on proxy
1136 # can't use delattr on proxy
1137 del self.__dict__['_tagscache']
1137 del self.__dict__['_tagscache']
1138
1138
1139 self.unfiltered()._branchcaches.clear()
1139 self.unfiltered()._branchcaches.clear()
1140 self.invalidatevolatilesets()
1140 self.invalidatevolatilesets()
1141
1141
1142 def invalidatevolatilesets(self):
1142 def invalidatevolatilesets(self):
1143 self.filteredrevcache.clear()
1143 self.filteredrevcache.clear()
1144 obsolete.clearobscaches(self)
1144 obsolete.clearobscaches(self)
1145
1145
1146 def invalidatedirstate(self):
1146 def invalidatedirstate(self):
1147 '''Invalidates the dirstate, causing the next call to dirstate
1147 '''Invalidates the dirstate, causing the next call to dirstate
1148 to check if it was modified since the last time it was read,
1148 to check if it was modified since the last time it was read,
1149 rereading it if it has.
1149 rereading it if it has.
1150
1150
1151 This is different to dirstate.invalidate() that it doesn't always
1151 This is different to dirstate.invalidate() that it doesn't always
1152 rereads the dirstate. Use dirstate.invalidate() if you want to
1152 rereads the dirstate. Use dirstate.invalidate() if you want to
1153 explicitly read the dirstate again (i.e. restoring it to a previous
1153 explicitly read the dirstate again (i.e. restoring it to a previous
1154 known good state).'''
1154 known good state).'''
1155 if hasunfilteredcache(self, 'dirstate'):
1155 if hasunfilteredcache(self, 'dirstate'):
1156 for k in self.dirstate._filecache:
1156 for k in self.dirstate._filecache:
1157 try:
1157 try:
1158 delattr(self.dirstate, k)
1158 delattr(self.dirstate, k)
1159 except AttributeError:
1159 except AttributeError:
1160 pass
1160 pass
1161 delattr(self.unfiltered(), 'dirstate')
1161 delattr(self.unfiltered(), 'dirstate')
1162
1162
1163 def invalidate(self):
1163 def invalidate(self):
1164 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1164 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1165 for k in self._filecache:
1165 for k in self._filecache:
1166 # dirstate is invalidated separately in invalidatedirstate()
1166 # dirstate is invalidated separately in invalidatedirstate()
1167 if k == 'dirstate':
1167 if k == 'dirstate':
1168 continue
1168 continue
1169
1169
1170 try:
1170 try:
1171 delattr(unfiltered, k)
1171 delattr(unfiltered, k)
1172 except AttributeError:
1172 except AttributeError:
1173 pass
1173 pass
1174 self.invalidatecaches()
1174 self.invalidatecaches()
1175 self.store.invalidatecaches()
1175 self.store.invalidatecaches()
1176
1176
1177 def invalidateall(self):
1177 def invalidateall(self):
1178 '''Fully invalidates both store and non-store parts, causing the
1178 '''Fully invalidates both store and non-store parts, causing the
1179 subsequent operation to reread any outside changes.'''
1179 subsequent operation to reread any outside changes.'''
1180 # extension should hook this to invalidate its caches
1180 # extension should hook this to invalidate its caches
1181 self.invalidate()
1181 self.invalidate()
1182 self.invalidatedirstate()
1182 self.invalidatedirstate()
1183
1183
1184 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1184 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1185 try:
1185 try:
1186 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1186 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1187 except error.LockHeld, inst:
1187 except error.LockHeld, inst:
1188 if not wait:
1188 if not wait:
1189 raise
1189 raise
1190 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1190 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1191 (desc, inst.locker))
1191 (desc, inst.locker))
1192 # default to 600 seconds timeout
1192 # default to 600 seconds timeout
1193 l = lockmod.lock(vfs, lockname,
1193 l = lockmod.lock(vfs, lockname,
1194 int(self.ui.config("ui", "timeout", "600")),
1194 int(self.ui.config("ui", "timeout", "600")),
1195 releasefn, desc=desc)
1195 releasefn, desc=desc)
1196 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1196 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1197 if acquirefn:
1197 if acquirefn:
1198 acquirefn()
1198 acquirefn()
1199 return l
1199 return l
1200
1200
1201 def _afterlock(self, callback):
1201 def _afterlock(self, callback):
1202 """add a callback to be run when the repository is fully unlocked
1202 """add a callback to be run when the repository is fully unlocked
1203
1203
1204 The callback will be executed when the outermost lock is released
1204 The callback will be executed when the outermost lock is released
1205 (with wlock being higher level than 'lock')."""
1205 (with wlock being higher level than 'lock')."""
1206 for ref in (self._wlockref, self._lockref):
1206 for ref in (self._wlockref, self._lockref):
1207 l = ref and ref()
1207 l = ref and ref()
1208 if l and l.held:
1208 if l and l.held:
1209 l.postrelease.append(callback)
1209 l.postrelease.append(callback)
1210 break
1210 break
1211 else: # no lock have been found.
1211 else: # no lock have been found.
1212 callback()
1212 callback()
1213
1213
1214 def lock(self, wait=True):
1214 def lock(self, wait=True):
1215 '''Lock the repository store (.hg/store) and return a weak reference
1215 '''Lock the repository store (.hg/store) and return a weak reference
1216 to the lock. Use this before modifying the store (e.g. committing or
1216 to the lock. Use this before modifying the store (e.g. committing or
1217 stripping). If you are opening a transaction, get a lock as well.)
1217 stripping). If you are opening a transaction, get a lock as well.)
1218
1218
1219 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1219 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1220 'wlock' first to avoid a dead-lock hazard.'''
1220 'wlock' first to avoid a dead-lock hazard.'''
1221 l = self._lockref and self._lockref()
1221 l = self._lockref and self._lockref()
1222 if l is not None and l.held:
1222 if l is not None and l.held:
1223 l.lock()
1223 l.lock()
1224 return l
1224 return l
1225
1225
1226 def unlock():
1226 def unlock():
1227 for k, ce in self._filecache.items():
1227 for k, ce in self._filecache.items():
1228 if k == 'dirstate' or k not in self.__dict__:
1228 if k == 'dirstate' or k not in self.__dict__:
1229 continue
1229 continue
1230 ce.refresh()
1230 ce.refresh()
1231
1231
1232 l = self._lock(self.svfs, "lock", wait, unlock,
1232 l = self._lock(self.svfs, "lock", wait, unlock,
1233 self.invalidate, _('repository %s') % self.origroot)
1233 self.invalidate, _('repository %s') % self.origroot)
1234 self._lockref = weakref.ref(l)
1234 self._lockref = weakref.ref(l)
1235 return l
1235 return l
1236
1236
1237 def wlock(self, wait=True):
1237 def wlock(self, wait=True):
1238 '''Lock the non-store parts of the repository (everything under
1238 '''Lock the non-store parts of the repository (everything under
1239 .hg except .hg/store) and return a weak reference to the lock.
1239 .hg except .hg/store) and return a weak reference to the lock.
1240
1240
1241 Use this before modifying files in .hg.
1241 Use this before modifying files in .hg.
1242
1242
1243 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1243 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1244 'wlock' first to avoid a dead-lock hazard.'''
1244 'wlock' first to avoid a dead-lock hazard.'''
1245 l = self._wlockref and self._wlockref()
1245 l = self._wlockref and self._wlockref()
1246 if l is not None and l.held:
1246 if l is not None and l.held:
1247 l.lock()
1247 l.lock()
1248 return l
1248 return l
1249
1249
1250 # We do not need to check for non-waiting lock aquisition. Such
1250 # We do not need to check for non-waiting lock aquisition. Such
1251 # acquisition would not cause dead-lock as they would just fail.
1251 # acquisition would not cause dead-lock as they would just fail.
1252 if wait and (self.ui.configbool('devel', 'all')
1252 if wait and (self.ui.configbool('devel', 'all')
1253 or self.ui.configbool('devel', 'check-locks')):
1253 or self.ui.configbool('devel', 'check-locks')):
1254 l = self._lockref and self._lockref()
1254 l = self._lockref and self._lockref()
1255 if l is not None and l.held:
1255 if l is not None and l.held:
1256 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1256 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1257
1257
1258 def unlock():
1258 def unlock():
1259 if self.dirstate.pendingparentchange():
1259 if self.dirstate.pendingparentchange():
1260 self.dirstate.invalidate()
1260 self.dirstate.invalidate()
1261 else:
1261 else:
1262 self.dirstate.write()
1262 self.dirstate.write()
1263
1263
1264 self._filecache['dirstate'].refresh()
1264 self._filecache['dirstate'].refresh()
1265
1265
1266 l = self._lock(self.vfs, "wlock", wait, unlock,
1266 l = self._lock(self.vfs, "wlock", wait, unlock,
1267 self.invalidatedirstate, _('working directory of %s') %
1267 self.invalidatedirstate, _('working directory of %s') %
1268 self.origroot)
1268 self.origroot)
1269 self._wlockref = weakref.ref(l)
1269 self._wlockref = weakref.ref(l)
1270 return l
1270 return l
1271
1271
1272 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1272 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1273 """
1273 """
1274 commit an individual file as part of a larger transaction
1274 commit an individual file as part of a larger transaction
1275 """
1275 """
1276
1276
1277 fname = fctx.path()
1277 fname = fctx.path()
1278 fparent1 = manifest1.get(fname, nullid)
1278 fparent1 = manifest1.get(fname, nullid)
1279 fparent2 = manifest2.get(fname, nullid)
1279 fparent2 = manifest2.get(fname, nullid)
1280 if isinstance(fctx, context.filectx):
1280 if isinstance(fctx, context.filectx):
1281 node = fctx.filenode()
1281 node = fctx.filenode()
1282 if node in [fparent1, fparent2]:
1282 if node in [fparent1, fparent2]:
1283 self.ui.debug('reusing %s filelog entry\n' % fname)
1283 self.ui.debug('reusing %s filelog entry\n' % fname)
1284 return node
1284 return node
1285
1285
1286 flog = self.file(fname)
1286 flog = self.file(fname)
1287 meta = {}
1287 meta = {}
1288 copy = fctx.renamed()
1288 copy = fctx.renamed()
1289 if copy and copy[0] != fname:
1289 if copy and copy[0] != fname:
1290 # Mark the new revision of this file as a copy of another
1290 # Mark the new revision of this file as a copy of another
1291 # file. This copy data will effectively act as a parent
1291 # file. This copy data will effectively act as a parent
1292 # of this new revision. If this is a merge, the first
1292 # of this new revision. If this is a merge, the first
1293 # parent will be the nullid (meaning "look up the copy data")
1293 # parent will be the nullid (meaning "look up the copy data")
1294 # and the second one will be the other parent. For example:
1294 # and the second one will be the other parent. For example:
1295 #
1295 #
1296 # 0 --- 1 --- 3 rev1 changes file foo
1296 # 0 --- 1 --- 3 rev1 changes file foo
1297 # \ / rev2 renames foo to bar and changes it
1297 # \ / rev2 renames foo to bar and changes it
1298 # \- 2 -/ rev3 should have bar with all changes and
1298 # \- 2 -/ rev3 should have bar with all changes and
1299 # should record that bar descends from
1299 # should record that bar descends from
1300 # bar in rev2 and foo in rev1
1300 # bar in rev2 and foo in rev1
1301 #
1301 #
1302 # this allows this merge to succeed:
1302 # this allows this merge to succeed:
1303 #
1303 #
1304 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1304 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1305 # \ / merging rev3 and rev4 should use bar@rev2
1305 # \ / merging rev3 and rev4 should use bar@rev2
1306 # \- 2 --- 4 as the merge base
1306 # \- 2 --- 4 as the merge base
1307 #
1307 #
1308
1308
1309 cfname = copy[0]
1309 cfname = copy[0]
1310 crev = manifest1.get(cfname)
1310 crev = manifest1.get(cfname)
1311 newfparent = fparent2
1311 newfparent = fparent2
1312
1312
1313 if manifest2: # branch merge
1313 if manifest2: # branch merge
1314 if fparent2 == nullid or crev is None: # copied on remote side
1314 if fparent2 == nullid or crev is None: # copied on remote side
1315 if cfname in manifest2:
1315 if cfname in manifest2:
1316 crev = manifest2[cfname]
1316 crev = manifest2[cfname]
1317 newfparent = fparent1
1317 newfparent = fparent1
1318
1318
1319 # Here, we used to search backwards through history to try to find
1319 # Here, we used to search backwards through history to try to find
1320 # where the file copy came from if the source of a copy was not in
1320 # where the file copy came from if the source of a copy was not in
1321 # the parent directory. However, this doesn't actually make sense to
1321 # the parent directory. However, this doesn't actually make sense to
1322 # do (what does a copy from something not in your working copy even
1322 # do (what does a copy from something not in your working copy even
1323 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1323 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1324 # the user that copy information was dropped, so if they didn't
1324 # the user that copy information was dropped, so if they didn't
1325 # expect this outcome it can be fixed, but this is the correct
1325 # expect this outcome it can be fixed, but this is the correct
1326 # behavior in this circumstance.
1326 # behavior in this circumstance.
1327
1327
1328 if crev:
1328 if crev:
1329 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1329 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1330 meta["copy"] = cfname
1330 meta["copy"] = cfname
1331 meta["copyrev"] = hex(crev)
1331 meta["copyrev"] = hex(crev)
1332 fparent1, fparent2 = nullid, newfparent
1332 fparent1, fparent2 = nullid, newfparent
1333 else:
1333 else:
1334 self.ui.warn(_("warning: can't find ancestor for '%s' "
1334 self.ui.warn(_("warning: can't find ancestor for '%s' "
1335 "copied from '%s'!\n") % (fname, cfname))
1335 "copied from '%s'!\n") % (fname, cfname))
1336
1336
1337 elif fparent1 == nullid:
1337 elif fparent1 == nullid:
1338 fparent1, fparent2 = fparent2, nullid
1338 fparent1, fparent2 = fparent2, nullid
1339 elif fparent2 != nullid:
1339 elif fparent2 != nullid:
1340 # is one parent an ancestor of the other?
1340 # is one parent an ancestor of the other?
1341 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1341 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1342 if fparent1 in fparentancestors:
1342 if fparent1 in fparentancestors:
1343 fparent1, fparent2 = fparent2, nullid
1343 fparent1, fparent2 = fparent2, nullid
1344 elif fparent2 in fparentancestors:
1344 elif fparent2 in fparentancestors:
1345 fparent2 = nullid
1345 fparent2 = nullid
1346
1346
1347 # is the file changed?
1347 # is the file changed?
1348 text = fctx.data()
1348 text = fctx.data()
1349 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1349 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1350 changelist.append(fname)
1350 changelist.append(fname)
1351 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1351 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1352 # are just the flags changed during merge?
1352 # are just the flags changed during merge?
1353 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1353 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1354 changelist.append(fname)
1354 changelist.append(fname)
1355
1355
1356 return fparent1
1356 return fparent1
1357
1357
1358 @unfilteredmethod
1358 @unfilteredmethod
1359 def commit(self, text="", user=None, date=None, match=None, force=False,
1359 def commit(self, text="", user=None, date=None, match=None, force=False,
1360 editor=False, extra={}):
1360 editor=False, extra={}):
1361 """Add a new revision to current repository.
1361 """Add a new revision to current repository.
1362
1362
1363 Revision information is gathered from the working directory,
1363 Revision information is gathered from the working directory,
1364 match can be used to filter the committed files. If editor is
1364 match can be used to filter the committed files. If editor is
1365 supplied, it is called to get a commit message.
1365 supplied, it is called to get a commit message.
1366 """
1366 """
1367
1367
1368 def fail(f, msg):
1368 def fail(f, msg):
1369 raise util.Abort('%s: %s' % (f, msg))
1369 raise util.Abort('%s: %s' % (f, msg))
1370
1370
1371 if not match:
1371 if not match:
1372 match = matchmod.always(self.root, '')
1372 match = matchmod.always(self.root, '')
1373
1373
1374 if not force:
1374 if not force:
1375 vdirs = []
1375 vdirs = []
1376 match.explicitdir = vdirs.append
1376 match.explicitdir = vdirs.append
1377 match.bad = fail
1377 match.bad = fail
1378
1378
1379 wlock = self.wlock()
1379 wlock = self.wlock()
1380 try:
1380 try:
1381 wctx = self[None]
1381 wctx = self[None]
1382 merge = len(wctx.parents()) > 1
1382 merge = len(wctx.parents()) > 1
1383
1383
1384 if not force and merge and match.ispartial():
1384 if not force and merge and match.ispartial():
1385 raise util.Abort(_('cannot partially commit a merge '
1385 raise util.Abort(_('cannot partially commit a merge '
1386 '(do not specify files or patterns)'))
1386 '(do not specify files or patterns)'))
1387
1387
1388 status = self.status(match=match, clean=force)
1388 status = self.status(match=match, clean=force)
1389 if force:
1389 if force:
1390 status.modified.extend(status.clean) # mq may commit clean files
1390 status.modified.extend(status.clean) # mq may commit clean files
1391
1391
1392 # check subrepos
1392 # check subrepos
1393 subs = []
1393 subs = []
1394 commitsubs = set()
1394 commitsubs = set()
1395 newstate = wctx.substate.copy()
1395 newstate = wctx.substate.copy()
1396 # only manage subrepos and .hgsubstate if .hgsub is present
1396 # only manage subrepos and .hgsubstate if .hgsub is present
1397 if '.hgsub' in wctx:
1397 if '.hgsub' in wctx:
1398 # we'll decide whether to track this ourselves, thanks
1398 # we'll decide whether to track this ourselves, thanks
1399 for c in status.modified, status.added, status.removed:
1399 for c in status.modified, status.added, status.removed:
1400 if '.hgsubstate' in c:
1400 if '.hgsubstate' in c:
1401 c.remove('.hgsubstate')
1401 c.remove('.hgsubstate')
1402
1402
1403 # compare current state to last committed state
1403 # compare current state to last committed state
1404 # build new substate based on last committed state
1404 # build new substate based on last committed state
1405 oldstate = wctx.p1().substate
1405 oldstate = wctx.p1().substate
1406 for s in sorted(newstate.keys()):
1406 for s in sorted(newstate.keys()):
1407 if not match(s):
1407 if not match(s):
1408 # ignore working copy, use old state if present
1408 # ignore working copy, use old state if present
1409 if s in oldstate:
1409 if s in oldstate:
1410 newstate[s] = oldstate[s]
1410 newstate[s] = oldstate[s]
1411 continue
1411 continue
1412 if not force:
1412 if not force:
1413 raise util.Abort(
1413 raise util.Abort(
1414 _("commit with new subrepo %s excluded") % s)
1414 _("commit with new subrepo %s excluded") % s)
1415 dirtyreason = wctx.sub(s).dirtyreason(True)
1415 dirtyreason = wctx.sub(s).dirtyreason(True)
1416 if dirtyreason:
1416 if dirtyreason:
1417 if not self.ui.configbool('ui', 'commitsubrepos'):
1417 if not self.ui.configbool('ui', 'commitsubrepos'):
1418 raise util.Abort(dirtyreason,
1418 raise util.Abort(dirtyreason,
1419 hint=_("use --subrepos for recursive commit"))
1419 hint=_("use --subrepos for recursive commit"))
1420 subs.append(s)
1420 subs.append(s)
1421 commitsubs.add(s)
1421 commitsubs.add(s)
1422 else:
1422 else:
1423 bs = wctx.sub(s).basestate()
1423 bs = wctx.sub(s).basestate()
1424 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1424 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1425 if oldstate.get(s, (None, None, None))[1] != bs:
1425 if oldstate.get(s, (None, None, None))[1] != bs:
1426 subs.append(s)
1426 subs.append(s)
1427
1427
1428 # check for removed subrepos
1428 # check for removed subrepos
1429 for p in wctx.parents():
1429 for p in wctx.parents():
1430 r = [s for s in p.substate if s not in newstate]
1430 r = [s for s in p.substate if s not in newstate]
1431 subs += [s for s in r if match(s)]
1431 subs += [s for s in r if match(s)]
1432 if subs:
1432 if subs:
1433 if (not match('.hgsub') and
1433 if (not match('.hgsub') and
1434 '.hgsub' in (wctx.modified() + wctx.added())):
1434 '.hgsub' in (wctx.modified() + wctx.added())):
1435 raise util.Abort(
1435 raise util.Abort(
1436 _("can't commit subrepos without .hgsub"))
1436 _("can't commit subrepos without .hgsub"))
1437 status.modified.insert(0, '.hgsubstate')
1437 status.modified.insert(0, '.hgsubstate')
1438
1438
1439 elif '.hgsub' in status.removed:
1439 elif '.hgsub' in status.removed:
1440 # clean up .hgsubstate when .hgsub is removed
1440 # clean up .hgsubstate when .hgsub is removed
1441 if ('.hgsubstate' in wctx and
1441 if ('.hgsubstate' in wctx and
1442 '.hgsubstate' not in (status.modified + status.added +
1442 '.hgsubstate' not in (status.modified + status.added +
1443 status.removed)):
1443 status.removed)):
1444 status.removed.insert(0, '.hgsubstate')
1444 status.removed.insert(0, '.hgsubstate')
1445
1445
1446 # make sure all explicit patterns are matched
1446 # make sure all explicit patterns are matched
1447 if not force and match.files():
1447 if not force and (match.isexact() or match.prefix()):
1448 matched = set(status.modified + status.added + status.removed)
1448 matched = set(status.modified + status.added + status.removed)
1449
1449
1450 for f in match.files():
1450 for f in match.files():
1451 f = self.dirstate.normalize(f)
1451 f = self.dirstate.normalize(f)
1452 if f == '.' or f in matched or f in wctx.substate:
1452 if f == '.' or f in matched or f in wctx.substate:
1453 continue
1453 continue
1454 if f in status.deleted:
1454 if f in status.deleted:
1455 fail(f, _('file not found!'))
1455 fail(f, _('file not found!'))
1456 if f in vdirs: # visited directory
1456 if f in vdirs: # visited directory
1457 d = f + '/'
1457 d = f + '/'
1458 for mf in matched:
1458 for mf in matched:
1459 if mf.startswith(d):
1459 if mf.startswith(d):
1460 break
1460 break
1461 else:
1461 else:
1462 fail(f, _("no match under directory!"))
1462 fail(f, _("no match under directory!"))
1463 elif f not in self.dirstate:
1463 elif f not in self.dirstate:
1464 fail(f, _("file not tracked!"))
1464 fail(f, _("file not tracked!"))
1465
1465
1466 cctx = context.workingcommitctx(self, status,
1466 cctx = context.workingcommitctx(self, status,
1467 text, user, date, extra)
1467 text, user, date, extra)
1468
1468
1469 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1469 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1470 or extra.get('close') or merge or cctx.files()
1470 or extra.get('close') or merge or cctx.files()
1471 or self.ui.configbool('ui', 'allowemptycommit'))
1471 or self.ui.configbool('ui', 'allowemptycommit'))
1472 if not allowemptycommit:
1472 if not allowemptycommit:
1473 return None
1473 return None
1474
1474
1475 if merge and cctx.deleted():
1475 if merge and cctx.deleted():
1476 raise util.Abort(_("cannot commit merge with missing files"))
1476 raise util.Abort(_("cannot commit merge with missing files"))
1477
1477
1478 ms = mergemod.mergestate(self)
1478 ms = mergemod.mergestate(self)
1479 for f in status.modified:
1479 for f in status.modified:
1480 if f in ms and ms[f] == 'u':
1480 if f in ms and ms[f] == 'u':
1481 raise util.Abort(_('unresolved merge conflicts '
1481 raise util.Abort(_('unresolved merge conflicts '
1482 '(see "hg help resolve")'))
1482 '(see "hg help resolve")'))
1483
1483
1484 if editor:
1484 if editor:
1485 cctx._text = editor(self, cctx, subs)
1485 cctx._text = editor(self, cctx, subs)
1486 edited = (text != cctx._text)
1486 edited = (text != cctx._text)
1487
1487
1488 # Save commit message in case this transaction gets rolled back
1488 # Save commit message in case this transaction gets rolled back
1489 # (e.g. by a pretxncommit hook). Leave the content alone on
1489 # (e.g. by a pretxncommit hook). Leave the content alone on
1490 # the assumption that the user will use the same editor again.
1490 # the assumption that the user will use the same editor again.
1491 msgfn = self.savecommitmessage(cctx._text)
1491 msgfn = self.savecommitmessage(cctx._text)
1492
1492
1493 # commit subs and write new state
1493 # commit subs and write new state
1494 if subs:
1494 if subs:
1495 for s in sorted(commitsubs):
1495 for s in sorted(commitsubs):
1496 sub = wctx.sub(s)
1496 sub = wctx.sub(s)
1497 self.ui.status(_('committing subrepository %s\n') %
1497 self.ui.status(_('committing subrepository %s\n') %
1498 subrepo.subrelpath(sub))
1498 subrepo.subrelpath(sub))
1499 sr = sub.commit(cctx._text, user, date)
1499 sr = sub.commit(cctx._text, user, date)
1500 newstate[s] = (newstate[s][0], sr)
1500 newstate[s] = (newstate[s][0], sr)
1501 subrepo.writestate(self, newstate)
1501 subrepo.writestate(self, newstate)
1502
1502
1503 p1, p2 = self.dirstate.parents()
1503 p1, p2 = self.dirstate.parents()
1504 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1504 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1505 try:
1505 try:
1506 self.hook("precommit", throw=True, parent1=hookp1,
1506 self.hook("precommit", throw=True, parent1=hookp1,
1507 parent2=hookp2)
1507 parent2=hookp2)
1508 ret = self.commitctx(cctx, True)
1508 ret = self.commitctx(cctx, True)
1509 except: # re-raises
1509 except: # re-raises
1510 if edited:
1510 if edited:
1511 self.ui.write(
1511 self.ui.write(
1512 _('note: commit message saved in %s\n') % msgfn)
1512 _('note: commit message saved in %s\n') % msgfn)
1513 raise
1513 raise
1514
1514
1515 # update bookmarks, dirstate and mergestate
1515 # update bookmarks, dirstate and mergestate
1516 bookmarks.update(self, [p1, p2], ret)
1516 bookmarks.update(self, [p1, p2], ret)
1517 cctx.markcommitted(ret)
1517 cctx.markcommitted(ret)
1518 ms.reset()
1518 ms.reset()
1519 finally:
1519 finally:
1520 wlock.release()
1520 wlock.release()
1521
1521
1522 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1522 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1523 # hack for command that use a temporary commit (eg: histedit)
1523 # hack for command that use a temporary commit (eg: histedit)
1524 # temporary commit got stripped before hook release
1524 # temporary commit got stripped before hook release
1525 if self.changelog.hasnode(ret):
1525 if self.changelog.hasnode(ret):
1526 self.hook("commit", node=node, parent1=parent1,
1526 self.hook("commit", node=node, parent1=parent1,
1527 parent2=parent2)
1527 parent2=parent2)
1528 self._afterlock(commithook)
1528 self._afterlock(commithook)
1529 return ret
1529 return ret
1530
1530
1531 @unfilteredmethod
1531 @unfilteredmethod
1532 def commitctx(self, ctx, error=False):
1532 def commitctx(self, ctx, error=False):
1533 """Add a new revision to current repository.
1533 """Add a new revision to current repository.
1534 Revision information is passed via the context argument.
1534 Revision information is passed via the context argument.
1535 """
1535 """
1536
1536
1537 tr = None
1537 tr = None
1538 p1, p2 = ctx.p1(), ctx.p2()
1538 p1, p2 = ctx.p1(), ctx.p2()
1539 user = ctx.user()
1539 user = ctx.user()
1540
1540
1541 lock = self.lock()
1541 lock = self.lock()
1542 try:
1542 try:
1543 tr = self.transaction("commit")
1543 tr = self.transaction("commit")
1544 trp = weakref.proxy(tr)
1544 trp = weakref.proxy(tr)
1545
1545
1546 if ctx.files():
1546 if ctx.files():
1547 m1 = p1.manifest()
1547 m1 = p1.manifest()
1548 m2 = p2.manifest()
1548 m2 = p2.manifest()
1549 m = m1.copy()
1549 m = m1.copy()
1550
1550
1551 # check in files
1551 # check in files
1552 added = []
1552 added = []
1553 changed = []
1553 changed = []
1554 removed = list(ctx.removed())
1554 removed = list(ctx.removed())
1555 linkrev = len(self)
1555 linkrev = len(self)
1556 self.ui.note(_("committing files:\n"))
1556 self.ui.note(_("committing files:\n"))
1557 for f in sorted(ctx.modified() + ctx.added()):
1557 for f in sorted(ctx.modified() + ctx.added()):
1558 self.ui.note(f + "\n")
1558 self.ui.note(f + "\n")
1559 try:
1559 try:
1560 fctx = ctx[f]
1560 fctx = ctx[f]
1561 if fctx is None:
1561 if fctx is None:
1562 removed.append(f)
1562 removed.append(f)
1563 else:
1563 else:
1564 added.append(f)
1564 added.append(f)
1565 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1565 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1566 trp, changed)
1566 trp, changed)
1567 m.setflag(f, fctx.flags())
1567 m.setflag(f, fctx.flags())
1568 except OSError, inst:
1568 except OSError, inst:
1569 self.ui.warn(_("trouble committing %s!\n") % f)
1569 self.ui.warn(_("trouble committing %s!\n") % f)
1570 raise
1570 raise
1571 except IOError, inst:
1571 except IOError, inst:
1572 errcode = getattr(inst, 'errno', errno.ENOENT)
1572 errcode = getattr(inst, 'errno', errno.ENOENT)
1573 if error or errcode and errcode != errno.ENOENT:
1573 if error or errcode and errcode != errno.ENOENT:
1574 self.ui.warn(_("trouble committing %s!\n") % f)
1574 self.ui.warn(_("trouble committing %s!\n") % f)
1575 raise
1575 raise
1576
1576
1577 # update manifest
1577 # update manifest
1578 self.ui.note(_("committing manifest\n"))
1578 self.ui.note(_("committing manifest\n"))
1579 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1579 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1580 drop = [f for f in removed if f in m]
1580 drop = [f for f in removed if f in m]
1581 for f in drop:
1581 for f in drop:
1582 del m[f]
1582 del m[f]
1583 mn = self.manifest.add(m, trp, linkrev,
1583 mn = self.manifest.add(m, trp, linkrev,
1584 p1.manifestnode(), p2.manifestnode(),
1584 p1.manifestnode(), p2.manifestnode(),
1585 added, drop)
1585 added, drop)
1586 files = changed + removed
1586 files = changed + removed
1587 else:
1587 else:
1588 mn = p1.manifestnode()
1588 mn = p1.manifestnode()
1589 files = []
1589 files = []
1590
1590
1591 # update changelog
1591 # update changelog
1592 self.ui.note(_("committing changelog\n"))
1592 self.ui.note(_("committing changelog\n"))
1593 self.changelog.delayupdate(tr)
1593 self.changelog.delayupdate(tr)
1594 n = self.changelog.add(mn, files, ctx.description(),
1594 n = self.changelog.add(mn, files, ctx.description(),
1595 trp, p1.node(), p2.node(),
1595 trp, p1.node(), p2.node(),
1596 user, ctx.date(), ctx.extra().copy())
1596 user, ctx.date(), ctx.extra().copy())
1597 p = lambda: tr.writepending() and self.root or ""
1597 p = lambda: tr.writepending() and self.root or ""
1598 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1598 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1599 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1599 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1600 parent2=xp2, pending=p)
1600 parent2=xp2, pending=p)
1601 # set the new commit is proper phase
1601 # set the new commit is proper phase
1602 targetphase = subrepo.newcommitphase(self.ui, ctx)
1602 targetphase = subrepo.newcommitphase(self.ui, ctx)
1603 if targetphase:
1603 if targetphase:
1604 # retract boundary do not alter parent changeset.
1604 # retract boundary do not alter parent changeset.
1605 # if a parent have higher the resulting phase will
1605 # if a parent have higher the resulting phase will
1606 # be compliant anyway
1606 # be compliant anyway
1607 #
1607 #
1608 # if minimal phase was 0 we don't need to retract anything
1608 # if minimal phase was 0 we don't need to retract anything
1609 phases.retractboundary(self, tr, targetphase, [n])
1609 phases.retractboundary(self, tr, targetphase, [n])
1610 tr.close()
1610 tr.close()
1611 branchmap.updatecache(self.filtered('served'))
1611 branchmap.updatecache(self.filtered('served'))
1612 return n
1612 return n
1613 finally:
1613 finally:
1614 if tr:
1614 if tr:
1615 tr.release()
1615 tr.release()
1616 lock.release()
1616 lock.release()
1617
1617
1618 @unfilteredmethod
1618 @unfilteredmethod
1619 def destroying(self):
1619 def destroying(self):
1620 '''Inform the repository that nodes are about to be destroyed.
1620 '''Inform the repository that nodes are about to be destroyed.
1621 Intended for use by strip and rollback, so there's a common
1621 Intended for use by strip and rollback, so there's a common
1622 place for anything that has to be done before destroying history.
1622 place for anything that has to be done before destroying history.
1623
1623
1624 This is mostly useful for saving state that is in memory and waiting
1624 This is mostly useful for saving state that is in memory and waiting
1625 to be flushed when the current lock is released. Because a call to
1625 to be flushed when the current lock is released. Because a call to
1626 destroyed is imminent, the repo will be invalidated causing those
1626 destroyed is imminent, the repo will be invalidated causing those
1627 changes to stay in memory (waiting for the next unlock), or vanish
1627 changes to stay in memory (waiting for the next unlock), or vanish
1628 completely.
1628 completely.
1629 '''
1629 '''
1630 # When using the same lock to commit and strip, the phasecache is left
1630 # When using the same lock to commit and strip, the phasecache is left
1631 # dirty after committing. Then when we strip, the repo is invalidated,
1631 # dirty after committing. Then when we strip, the repo is invalidated,
1632 # causing those changes to disappear.
1632 # causing those changes to disappear.
1633 if '_phasecache' in vars(self):
1633 if '_phasecache' in vars(self):
1634 self._phasecache.write()
1634 self._phasecache.write()
1635
1635
1636 @unfilteredmethod
1636 @unfilteredmethod
1637 def destroyed(self):
1637 def destroyed(self):
1638 '''Inform the repository that nodes have been destroyed.
1638 '''Inform the repository that nodes have been destroyed.
1639 Intended for use by strip and rollback, so there's a common
1639 Intended for use by strip and rollback, so there's a common
1640 place for anything that has to be done after destroying history.
1640 place for anything that has to be done after destroying history.
1641 '''
1641 '''
1642 # When one tries to:
1642 # When one tries to:
1643 # 1) destroy nodes thus calling this method (e.g. strip)
1643 # 1) destroy nodes thus calling this method (e.g. strip)
1644 # 2) use phasecache somewhere (e.g. commit)
1644 # 2) use phasecache somewhere (e.g. commit)
1645 #
1645 #
1646 # then 2) will fail because the phasecache contains nodes that were
1646 # then 2) will fail because the phasecache contains nodes that were
1647 # removed. We can either remove phasecache from the filecache,
1647 # removed. We can either remove phasecache from the filecache,
1648 # causing it to reload next time it is accessed, or simply filter
1648 # causing it to reload next time it is accessed, or simply filter
1649 # the removed nodes now and write the updated cache.
1649 # the removed nodes now and write the updated cache.
1650 self._phasecache.filterunknown(self)
1650 self._phasecache.filterunknown(self)
1651 self._phasecache.write()
1651 self._phasecache.write()
1652
1652
1653 # update the 'served' branch cache to help read only server process
1653 # update the 'served' branch cache to help read only server process
1654 # Thanks to branchcache collaboration this is done from the nearest
1654 # Thanks to branchcache collaboration this is done from the nearest
1655 # filtered subset and it is expected to be fast.
1655 # filtered subset and it is expected to be fast.
1656 branchmap.updatecache(self.filtered('served'))
1656 branchmap.updatecache(self.filtered('served'))
1657
1657
1658 # Ensure the persistent tag cache is updated. Doing it now
1658 # Ensure the persistent tag cache is updated. Doing it now
1659 # means that the tag cache only has to worry about destroyed
1659 # means that the tag cache only has to worry about destroyed
1660 # heads immediately after a strip/rollback. That in turn
1660 # heads immediately after a strip/rollback. That in turn
1661 # guarantees that "cachetip == currenttip" (comparing both rev
1661 # guarantees that "cachetip == currenttip" (comparing both rev
1662 # and node) always means no nodes have been added or destroyed.
1662 # and node) always means no nodes have been added or destroyed.
1663
1663
1664 # XXX this is suboptimal when qrefresh'ing: we strip the current
1664 # XXX this is suboptimal when qrefresh'ing: we strip the current
1665 # head, refresh the tag cache, then immediately add a new head.
1665 # head, refresh the tag cache, then immediately add a new head.
1666 # But I think doing it this way is necessary for the "instant
1666 # But I think doing it this way is necessary for the "instant
1667 # tag cache retrieval" case to work.
1667 # tag cache retrieval" case to work.
1668 self.invalidate()
1668 self.invalidate()
1669
1669
1670 def walk(self, match, node=None):
1670 def walk(self, match, node=None):
1671 '''
1671 '''
1672 walk recursively through the directory tree or a given
1672 walk recursively through the directory tree or a given
1673 changeset, finding all files matched by the match
1673 changeset, finding all files matched by the match
1674 function
1674 function
1675 '''
1675 '''
1676 return self[node].walk(match)
1676 return self[node].walk(match)
1677
1677
1678 def status(self, node1='.', node2=None, match=None,
1678 def status(self, node1='.', node2=None, match=None,
1679 ignored=False, clean=False, unknown=False,
1679 ignored=False, clean=False, unknown=False,
1680 listsubrepos=False):
1680 listsubrepos=False):
1681 '''a convenience method that calls node1.status(node2)'''
1681 '''a convenience method that calls node1.status(node2)'''
1682 return self[node1].status(node2, match, ignored, clean, unknown,
1682 return self[node1].status(node2, match, ignored, clean, unknown,
1683 listsubrepos)
1683 listsubrepos)
1684
1684
1685 def heads(self, start=None):
1685 def heads(self, start=None):
1686 heads = self.changelog.heads(start)
1686 heads = self.changelog.heads(start)
1687 # sort the output in rev descending order
1687 # sort the output in rev descending order
1688 return sorted(heads, key=self.changelog.rev, reverse=True)
1688 return sorted(heads, key=self.changelog.rev, reverse=True)
1689
1689
1690 def branchheads(self, branch=None, start=None, closed=False):
1690 def branchheads(self, branch=None, start=None, closed=False):
1691 '''return a (possibly filtered) list of heads for the given branch
1691 '''return a (possibly filtered) list of heads for the given branch
1692
1692
1693 Heads are returned in topological order, from newest to oldest.
1693 Heads are returned in topological order, from newest to oldest.
1694 If branch is None, use the dirstate branch.
1694 If branch is None, use the dirstate branch.
1695 If start is not None, return only heads reachable from start.
1695 If start is not None, return only heads reachable from start.
1696 If closed is True, return heads that are marked as closed as well.
1696 If closed is True, return heads that are marked as closed as well.
1697 '''
1697 '''
1698 if branch is None:
1698 if branch is None:
1699 branch = self[None].branch()
1699 branch = self[None].branch()
1700 branches = self.branchmap()
1700 branches = self.branchmap()
1701 if branch not in branches:
1701 if branch not in branches:
1702 return []
1702 return []
1703 # the cache returns heads ordered lowest to highest
1703 # the cache returns heads ordered lowest to highest
1704 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1704 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1705 if start is not None:
1705 if start is not None:
1706 # filter out the heads that cannot be reached from startrev
1706 # filter out the heads that cannot be reached from startrev
1707 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1707 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1708 bheads = [h for h in bheads if h in fbheads]
1708 bheads = [h for h in bheads if h in fbheads]
1709 return bheads
1709 return bheads
1710
1710
1711 def branches(self, nodes):
1711 def branches(self, nodes):
1712 if not nodes:
1712 if not nodes:
1713 nodes = [self.changelog.tip()]
1713 nodes = [self.changelog.tip()]
1714 b = []
1714 b = []
1715 for n in nodes:
1715 for n in nodes:
1716 t = n
1716 t = n
1717 while True:
1717 while True:
1718 p = self.changelog.parents(n)
1718 p = self.changelog.parents(n)
1719 if p[1] != nullid or p[0] == nullid:
1719 if p[1] != nullid or p[0] == nullid:
1720 b.append((t, n, p[0], p[1]))
1720 b.append((t, n, p[0], p[1]))
1721 break
1721 break
1722 n = p[0]
1722 n = p[0]
1723 return b
1723 return b
1724
1724
1725 def between(self, pairs):
1725 def between(self, pairs):
1726 r = []
1726 r = []
1727
1727
1728 for top, bottom in pairs:
1728 for top, bottom in pairs:
1729 n, l, i = top, [], 0
1729 n, l, i = top, [], 0
1730 f = 1
1730 f = 1
1731
1731
1732 while n != bottom and n != nullid:
1732 while n != bottom and n != nullid:
1733 p = self.changelog.parents(n)[0]
1733 p = self.changelog.parents(n)[0]
1734 if i == f:
1734 if i == f:
1735 l.append(n)
1735 l.append(n)
1736 f = f * 2
1736 f = f * 2
1737 n = p
1737 n = p
1738 i += 1
1738 i += 1
1739
1739
1740 r.append(l)
1740 r.append(l)
1741
1741
1742 return r
1742 return r
1743
1743
1744 def checkpush(self, pushop):
1744 def checkpush(self, pushop):
1745 """Extensions can override this function if additional checks have
1745 """Extensions can override this function if additional checks have
1746 to be performed before pushing, or call it if they override push
1746 to be performed before pushing, or call it if they override push
1747 command.
1747 command.
1748 """
1748 """
1749 pass
1749 pass
1750
1750
1751 @unfilteredpropertycache
1751 @unfilteredpropertycache
1752 def prepushoutgoinghooks(self):
1752 def prepushoutgoinghooks(self):
1753 """Return util.hooks consists of "(repo, remote, outgoing)"
1753 """Return util.hooks consists of "(repo, remote, outgoing)"
1754 functions, which are called before pushing changesets.
1754 functions, which are called before pushing changesets.
1755 """
1755 """
1756 return util.hooks()
1756 return util.hooks()
1757
1757
1758 def stream_in(self, remote, remotereqs):
1758 def stream_in(self, remote, remotereqs):
1759 # Save remote branchmap. We will use it later
1759 # Save remote branchmap. We will use it later
1760 # to speed up branchcache creation
1760 # to speed up branchcache creation
1761 rbranchmap = None
1761 rbranchmap = None
1762 if remote.capable("branchmap"):
1762 if remote.capable("branchmap"):
1763 rbranchmap = remote.branchmap()
1763 rbranchmap = remote.branchmap()
1764
1764
1765 fp = remote.stream_out()
1765 fp = remote.stream_out()
1766 l = fp.readline()
1766 l = fp.readline()
1767 try:
1767 try:
1768 resp = int(l)
1768 resp = int(l)
1769 except ValueError:
1769 except ValueError:
1770 raise error.ResponseError(
1770 raise error.ResponseError(
1771 _('unexpected response from remote server:'), l)
1771 _('unexpected response from remote server:'), l)
1772 if resp == 1:
1772 if resp == 1:
1773 raise util.Abort(_('operation forbidden by server'))
1773 raise util.Abort(_('operation forbidden by server'))
1774 elif resp == 2:
1774 elif resp == 2:
1775 raise util.Abort(_('locking the remote repository failed'))
1775 raise util.Abort(_('locking the remote repository failed'))
1776 elif resp != 0:
1776 elif resp != 0:
1777 raise util.Abort(_('the server sent an unknown error code'))
1777 raise util.Abort(_('the server sent an unknown error code'))
1778
1778
1779 self.applystreamclone(remotereqs, rbranchmap, fp)
1779 self.applystreamclone(remotereqs, rbranchmap, fp)
1780 return len(self.heads()) + 1
1780 return len(self.heads()) + 1
1781
1781
1782 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1782 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1783 """Apply stream clone data to this repository.
1783 """Apply stream clone data to this repository.
1784
1784
1785 "remotereqs" is a set of requirements to handle the incoming data.
1785 "remotereqs" is a set of requirements to handle the incoming data.
1786 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1786 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1787 can be None.
1787 can be None.
1788 "fp" is a file object containing the raw stream data, suitable for
1788 "fp" is a file object containing the raw stream data, suitable for
1789 feeding into exchange.consumestreamclone.
1789 feeding into exchange.consumestreamclone.
1790 """
1790 """
1791 lock = self.lock()
1791 lock = self.lock()
1792 try:
1792 try:
1793 exchange.consumestreamclone(self, fp)
1793 exchange.consumestreamclone(self, fp)
1794
1794
1795 # new requirements = old non-format requirements +
1795 # new requirements = old non-format requirements +
1796 # new format-related remote requirements
1796 # new format-related remote requirements
1797 # requirements from the streamed-in repository
1797 # requirements from the streamed-in repository
1798 self.requirements = remotereqs | (
1798 self.requirements = remotereqs | (
1799 self.requirements - self.supportedformats)
1799 self.requirements - self.supportedformats)
1800 self._applyopenerreqs()
1800 self._applyopenerreqs()
1801 self._writerequirements()
1801 self._writerequirements()
1802
1802
1803 if remotebranchmap:
1803 if remotebranchmap:
1804 rbheads = []
1804 rbheads = []
1805 closed = []
1805 closed = []
1806 for bheads in remotebranchmap.itervalues():
1806 for bheads in remotebranchmap.itervalues():
1807 rbheads.extend(bheads)
1807 rbheads.extend(bheads)
1808 for h in bheads:
1808 for h in bheads:
1809 r = self.changelog.rev(h)
1809 r = self.changelog.rev(h)
1810 b, c = self.changelog.branchinfo(r)
1810 b, c = self.changelog.branchinfo(r)
1811 if c:
1811 if c:
1812 closed.append(h)
1812 closed.append(h)
1813
1813
1814 if rbheads:
1814 if rbheads:
1815 rtiprev = max((int(self.changelog.rev(node))
1815 rtiprev = max((int(self.changelog.rev(node))
1816 for node in rbheads))
1816 for node in rbheads))
1817 cache = branchmap.branchcache(remotebranchmap,
1817 cache = branchmap.branchcache(remotebranchmap,
1818 self[rtiprev].node(),
1818 self[rtiprev].node(),
1819 rtiprev,
1819 rtiprev,
1820 closednodes=closed)
1820 closednodes=closed)
1821 # Try to stick it as low as possible
1821 # Try to stick it as low as possible
1822 # filter above served are unlikely to be fetch from a clone
1822 # filter above served are unlikely to be fetch from a clone
1823 for candidate in ('base', 'immutable', 'served'):
1823 for candidate in ('base', 'immutable', 'served'):
1824 rview = self.filtered(candidate)
1824 rview = self.filtered(candidate)
1825 if cache.validfor(rview):
1825 if cache.validfor(rview):
1826 self._branchcaches[candidate] = cache
1826 self._branchcaches[candidate] = cache
1827 cache.write(rview)
1827 cache.write(rview)
1828 break
1828 break
1829 self.invalidate()
1829 self.invalidate()
1830 finally:
1830 finally:
1831 lock.release()
1831 lock.release()
1832
1832
1833 def clone(self, remote, heads=[], stream=None):
1833 def clone(self, remote, heads=[], stream=None):
1834 '''clone remote repository.
1834 '''clone remote repository.
1835
1835
1836 keyword arguments:
1836 keyword arguments:
1837 heads: list of revs to clone (forces use of pull)
1837 heads: list of revs to clone (forces use of pull)
1838 stream: use streaming clone if possible'''
1838 stream: use streaming clone if possible'''
1839
1839
1840 # now, all clients that can request uncompressed clones can
1840 # now, all clients that can request uncompressed clones can
1841 # read repo formats supported by all servers that can serve
1841 # read repo formats supported by all servers that can serve
1842 # them.
1842 # them.
1843
1843
1844 # if revlog format changes, client will have to check version
1844 # if revlog format changes, client will have to check version
1845 # and format flags on "stream" capability, and use
1845 # and format flags on "stream" capability, and use
1846 # uncompressed only if compatible.
1846 # uncompressed only if compatible.
1847
1847
1848 if stream is None:
1848 if stream is None:
1849 # if the server explicitly prefers to stream (for fast LANs)
1849 # if the server explicitly prefers to stream (for fast LANs)
1850 stream = remote.capable('stream-preferred')
1850 stream = remote.capable('stream-preferred')
1851
1851
1852 if stream and not heads:
1852 if stream and not heads:
1853 # 'stream' means remote revlog format is revlogv1 only
1853 # 'stream' means remote revlog format is revlogv1 only
1854 if remote.capable('stream'):
1854 if remote.capable('stream'):
1855 self.stream_in(remote, set(('revlogv1',)))
1855 self.stream_in(remote, set(('revlogv1',)))
1856 else:
1856 else:
1857 # otherwise, 'streamreqs' contains the remote revlog format
1857 # otherwise, 'streamreqs' contains the remote revlog format
1858 streamreqs = remote.capable('streamreqs')
1858 streamreqs = remote.capable('streamreqs')
1859 if streamreqs:
1859 if streamreqs:
1860 streamreqs = set(streamreqs.split(','))
1860 streamreqs = set(streamreqs.split(','))
1861 # if we support it, stream in and adjust our requirements
1861 # if we support it, stream in and adjust our requirements
1862 if not streamreqs - self.supportedformats:
1862 if not streamreqs - self.supportedformats:
1863 self.stream_in(remote, streamreqs)
1863 self.stream_in(remote, streamreqs)
1864
1864
1865 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1865 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1866 try:
1866 try:
1867 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1867 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1868 ret = exchange.pull(self, remote, heads).cgresult
1868 ret = exchange.pull(self, remote, heads).cgresult
1869 finally:
1869 finally:
1870 self.ui.restoreconfig(quiet)
1870 self.ui.restoreconfig(quiet)
1871 return ret
1871 return ret
1872
1872
1873 def pushkey(self, namespace, key, old, new):
1873 def pushkey(self, namespace, key, old, new):
1874 try:
1874 try:
1875 tr = self.currenttransaction()
1875 tr = self.currenttransaction()
1876 hookargs = {}
1876 hookargs = {}
1877 if tr is not None:
1877 if tr is not None:
1878 hookargs.update(tr.hookargs)
1878 hookargs.update(tr.hookargs)
1879 pending = lambda: tr.writepending() and self.root or ""
1879 pending = lambda: tr.writepending() and self.root or ""
1880 hookargs['pending'] = pending
1880 hookargs['pending'] = pending
1881 hookargs['namespace'] = namespace
1881 hookargs['namespace'] = namespace
1882 hookargs['key'] = key
1882 hookargs['key'] = key
1883 hookargs['old'] = old
1883 hookargs['old'] = old
1884 hookargs['new'] = new
1884 hookargs['new'] = new
1885 self.hook('prepushkey', throw=True, **hookargs)
1885 self.hook('prepushkey', throw=True, **hookargs)
1886 except error.HookAbort, exc:
1886 except error.HookAbort, exc:
1887 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1887 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1888 if exc.hint:
1888 if exc.hint:
1889 self.ui.write_err(_("(%s)\n") % exc.hint)
1889 self.ui.write_err(_("(%s)\n") % exc.hint)
1890 return False
1890 return False
1891 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1891 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1892 ret = pushkey.push(self, namespace, key, old, new)
1892 ret = pushkey.push(self, namespace, key, old, new)
1893 def runhook():
1893 def runhook():
1894 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1894 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1895 ret=ret)
1895 ret=ret)
1896 self._afterlock(runhook)
1896 self._afterlock(runhook)
1897 return ret
1897 return ret
1898
1898
1899 def listkeys(self, namespace):
1899 def listkeys(self, namespace):
1900 self.hook('prelistkeys', throw=True, namespace=namespace)
1900 self.hook('prelistkeys', throw=True, namespace=namespace)
1901 self.ui.debug('listing keys for "%s"\n' % namespace)
1901 self.ui.debug('listing keys for "%s"\n' % namespace)
1902 values = pushkey.list(self, namespace)
1902 values = pushkey.list(self, namespace)
1903 self.hook('listkeys', namespace=namespace, values=values)
1903 self.hook('listkeys', namespace=namespace, values=values)
1904 return values
1904 return values
1905
1905
1906 def debugwireargs(self, one, two, three=None, four=None, five=None):
1906 def debugwireargs(self, one, two, three=None, four=None, five=None):
1907 '''used to test argument passing over the wire'''
1907 '''used to test argument passing over the wire'''
1908 return "%s %s %s %s %s" % (one, two, three, four, five)
1908 return "%s %s %s %s %s" % (one, two, three, four, five)
1909
1909
1910 def savecommitmessage(self, text):
1910 def savecommitmessage(self, text):
1911 fp = self.vfs('last-message.txt', 'wb')
1911 fp = self.vfs('last-message.txt', 'wb')
1912 try:
1912 try:
1913 fp.write(text)
1913 fp.write(text)
1914 finally:
1914 finally:
1915 fp.close()
1915 fp.close()
1916 return self.pathto(fp.name[len(self.root) + 1:])
1916 return self.pathto(fp.name[len(self.root) + 1:])
1917
1917
1918 # used to avoid circular references so destructors work
1918 # used to avoid circular references so destructors work
1919 def aftertrans(files):
1919 def aftertrans(files):
1920 renamefiles = [tuple(t) for t in files]
1920 renamefiles = [tuple(t) for t in files]
1921 def a():
1921 def a():
1922 for vfs, src, dest in renamefiles:
1922 for vfs, src, dest in renamefiles:
1923 try:
1923 try:
1924 vfs.rename(src, dest)
1924 vfs.rename(src, dest)
1925 except OSError: # journal file does not yet exist
1925 except OSError: # journal file does not yet exist
1926 pass
1926 pass
1927 return a
1927 return a
1928
1928
1929 def undoname(fn):
1929 def undoname(fn):
1930 base, name = os.path.split(fn)
1930 base, name = os.path.split(fn)
1931 assert name.startswith('journal')
1931 assert name.startswith('journal')
1932 return os.path.join(base, name.replace('journal', 'undo', 1))
1932 return os.path.join(base, name.replace('journal', 'undo', 1))
1933
1933
1934 def instance(ui, path, create):
1934 def instance(ui, path, create):
1935 return localrepository(ui, util.urllocalpath(path), create)
1935 return localrepository(ui, util.urllocalpath(path), create)
1936
1936
1937 def islocal(path):
1937 def islocal(path):
1938 return True
1938 return True
General Comments 0
You need to be logged in to leave comments. Login now