##// END OF EJS Templates
localrepo: eliminate local requirements var in init...
Drew Gottlieb -
r24918:2eac3ae0 default
parent child Browse files
Show More
@@ -1,1971 +1,1972
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced, exc:
155 except error.PushRaced, exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 'dotencode'))
197 'dotencode'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 filtername = None
199 filtername = None
200
200
201 # a list of (ui, featureset) functions.
201 # a list of (ui, featureset) functions.
202 # only functions defined in module of enabled extensions are invoked
202 # only functions defined in module of enabled extensions are invoked
203 featuresetupfuncs = set()
203 featuresetupfuncs = set()
204
204
205 def _baserequirements(self, create):
205 def _baserequirements(self, create):
206 return ['revlogv1']
206 return ['revlogv1']
207
207
208 def __init__(self, baseui, path=None, create=False):
208 def __init__(self, baseui, path=None, create=False):
209 self.requirements = set()
209 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
210 self.wopener = self.wvfs
211 self.wopener = self.wvfs
211 self.root = self.wvfs.base
212 self.root = self.wvfs.base
212 self.path = self.wvfs.join(".hg")
213 self.path = self.wvfs.join(".hg")
213 self.origroot = path
214 self.origroot = path
214 self.auditor = pathutil.pathauditor(self.root, self._checknested)
215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
215 self.vfs = scmutil.vfs(self.path)
216 self.vfs = scmutil.vfs(self.path)
216 self.opener = self.vfs
217 self.opener = self.vfs
217 self.baseui = baseui
218 self.baseui = baseui
218 self.ui = baseui.copy()
219 self.ui = baseui.copy()
219 self.ui.copy = baseui.copy # prevent copying repo configuration
220 self.ui.copy = baseui.copy # prevent copying repo configuration
220 # A list of callback to shape the phase if no data were found.
221 # A list of callback to shape the phase if no data were found.
221 # Callback are in the form: func(repo, roots) --> processed root.
222 # Callback are in the form: func(repo, roots) --> processed root.
222 # This list it to be filled by extension during repo setup
223 # This list it to be filled by extension during repo setup
223 self._phasedefaults = []
224 self._phasedefaults = []
224 try:
225 try:
225 self.ui.readconfig(self.join("hgrc"), self.root)
226 self.ui.readconfig(self.join("hgrc"), self.root)
226 extensions.loadall(self.ui)
227 extensions.loadall(self.ui)
227 except IOError:
228 except IOError:
228 pass
229 pass
229
230
230 if self.featuresetupfuncs:
231 if self.featuresetupfuncs:
231 self.supported = set(self._basesupported) # use private copy
232 self.supported = set(self._basesupported) # use private copy
232 extmods = set(m.__name__ for n, m
233 extmods = set(m.__name__ for n, m
233 in extensions.extensions(self.ui))
234 in extensions.extensions(self.ui))
234 for setupfunc in self.featuresetupfuncs:
235 for setupfunc in self.featuresetupfuncs:
235 if setupfunc.__module__ in extmods:
236 if setupfunc.__module__ in extmods:
236 setupfunc(self.ui, self.supported)
237 setupfunc(self.ui, self.supported)
237 else:
238 else:
238 self.supported = self._basesupported
239 self.supported = self._basesupported
239
240
240 if not self.vfs.isdir():
241 if not self.vfs.isdir():
241 if create:
242 if create:
242 if not self.wvfs.exists():
243 if not self.wvfs.exists():
243 self.wvfs.makedirs()
244 self.wvfs.makedirs()
244 self.vfs.makedir(notindexed=True)
245 self.vfs.makedir(notindexed=True)
245 requirements = set(self._baserequirements(create))
246 self.requirements.update(self._baserequirements(create))
246 if self.ui.configbool('format', 'usestore', True):
247 if self.ui.configbool('format', 'usestore', True):
247 self.vfs.mkdir("store")
248 self.vfs.mkdir("store")
248 requirements.add("store")
249 self.requirements.add("store")
249 if self.ui.configbool('format', 'usefncache', True):
250 if self.ui.configbool('format', 'usefncache', True):
250 requirements.add("fncache")
251 self.requirements.add("fncache")
251 if self.ui.configbool('format', 'dotencode', True):
252 if self.ui.configbool('format', 'dotencode', True):
252 requirements.add('dotencode')
253 self.requirements.add('dotencode')
253 # create an invalid changelog
254 # create an invalid changelog
254 self.vfs.append(
255 self.vfs.append(
255 "00changelog.i",
256 "00changelog.i",
256 '\0\0\0\2' # represents revlogv2
257 '\0\0\0\2' # represents revlogv2
257 ' dummy changelog to prevent using the old repo layout'
258 ' dummy changelog to prevent using the old repo layout'
258 )
259 )
259 if self.ui.configbool('format', 'generaldelta', False):
260 if self.ui.configbool('format', 'generaldelta', False):
260 requirements.add("generaldelta")
261 self.requirements.add("generaldelta")
261 if self.ui.configbool('experimental', 'manifestv2', False):
262 if self.ui.configbool('experimental', 'manifestv2', False):
262 requirements.add("manifestv2")
263 self.requirements.add("manifestv2")
263 else:
264 else:
264 raise error.RepoError(_("repository %s not found") % path)
265 raise error.RepoError(_("repository %s not found") % path)
265 elif create:
266 elif create:
266 raise error.RepoError(_("repository %s already exists") % path)
267 raise error.RepoError(_("repository %s already exists") % path)
267 else:
268 else:
268 try:
269 try:
269 requirements = scmutil.readrequires(self.vfs, self.supported)
270 self.requirements = scmutil.readrequires(
271 self.vfs, self.supported)
270 except IOError, inst:
272 except IOError, inst:
271 if inst.errno != errno.ENOENT:
273 if inst.errno != errno.ENOENT:
272 raise
274 raise
273 requirements = set()
274
275
275 self.sharedpath = self.path
276 self.sharedpath = self.path
276 try:
277 try:
277 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
278 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
278 realpath=True)
279 realpath=True)
279 s = vfs.base
280 s = vfs.base
280 if not vfs.exists():
281 if not vfs.exists():
281 raise error.RepoError(
282 raise error.RepoError(
282 _('.hg/sharedpath points to nonexistent directory %s') % s)
283 _('.hg/sharedpath points to nonexistent directory %s') % s)
283 self.sharedpath = s
284 self.sharedpath = s
284 except IOError, inst:
285 except IOError, inst:
285 if inst.errno != errno.ENOENT:
286 if inst.errno != errno.ENOENT:
286 raise
287 raise
287
288
288 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
289 self.store = store.store(
290 self.requirements, self.sharedpath, scmutil.vfs)
289 self.spath = self.store.path
291 self.spath = self.store.path
290 self.svfs = self.store.vfs
292 self.svfs = self.store.vfs
291 self.sopener = self.svfs
293 self.sopener = self.svfs
292 self.sjoin = self.store.join
294 self.sjoin = self.store.join
293 self.vfs.createmode = self.store.createmode
295 self.vfs.createmode = self.store.createmode
294 self.requirements = requirements
295 self._applyopenerreqs()
296 self._applyopenerreqs()
296 if create:
297 if create:
297 self._writerequirements()
298 self._writerequirements()
298
299
299
300
300 self._branchcaches = {}
301 self._branchcaches = {}
301 self._revbranchcache = None
302 self._revbranchcache = None
302 self.filterpats = {}
303 self.filterpats = {}
303 self._datafilters = {}
304 self._datafilters = {}
304 self._transref = self._lockref = self._wlockref = None
305 self._transref = self._lockref = self._wlockref = None
305
306
306 # A cache for various files under .hg/ that tracks file changes,
307 # A cache for various files under .hg/ that tracks file changes,
307 # (used by the filecache decorator)
308 # (used by the filecache decorator)
308 #
309 #
309 # Maps a property name to its util.filecacheentry
310 # Maps a property name to its util.filecacheentry
310 self._filecache = {}
311 self._filecache = {}
311
312
312 # hold sets of revision to be filtered
313 # hold sets of revision to be filtered
313 # should be cleared when something might have changed the filter value:
314 # should be cleared when something might have changed the filter value:
314 # - new changesets,
315 # - new changesets,
315 # - phase change,
316 # - phase change,
316 # - new obsolescence marker,
317 # - new obsolescence marker,
317 # - working directory parent change,
318 # - working directory parent change,
318 # - bookmark changes
319 # - bookmark changes
319 self.filteredrevcache = {}
320 self.filteredrevcache = {}
320
321
321 # generic mapping between names and nodes
322 # generic mapping between names and nodes
322 self.names = namespaces.namespaces()
323 self.names = namespaces.namespaces()
323
324
324 def close(self):
325 def close(self):
325 self._writecaches()
326 self._writecaches()
326
327
327 def _writecaches(self):
328 def _writecaches(self):
328 if self._revbranchcache:
329 if self._revbranchcache:
329 self._revbranchcache.write()
330 self._revbranchcache.write()
330
331
331 def _restrictcapabilities(self, caps):
332 def _restrictcapabilities(self, caps):
332 if self.ui.configbool('experimental', 'bundle2-advertise', True):
333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
333 caps = set(caps)
334 caps = set(caps)
334 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
335 caps.add('bundle2=' + urllib.quote(capsblob))
336 caps.add('bundle2=' + urllib.quote(capsblob))
336 return caps
337 return caps
337
338
338 def _applyopenerreqs(self):
339 def _applyopenerreqs(self):
339 self.svfs.options = dict((r, 1) for r in self.requirements
340 self.svfs.options = dict((r, 1) for r in self.requirements
340 if r in self.openerreqs)
341 if r in self.openerreqs)
341 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
342 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
342 if chunkcachesize is not None:
343 if chunkcachesize is not None:
343 self.svfs.options['chunkcachesize'] = chunkcachesize
344 self.svfs.options['chunkcachesize'] = chunkcachesize
344 maxchainlen = self.ui.configint('format', 'maxchainlen')
345 maxchainlen = self.ui.configint('format', 'maxchainlen')
345 if maxchainlen is not None:
346 if maxchainlen is not None:
346 self.svfs.options['maxchainlen'] = maxchainlen
347 self.svfs.options['maxchainlen'] = maxchainlen
347 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
348 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
348 if manifestcachesize is not None:
349 if manifestcachesize is not None:
349 self.svfs.options['manifestcachesize'] = manifestcachesize
350 self.svfs.options['manifestcachesize'] = manifestcachesize
350 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
351 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
351 if usetreemanifest is not None:
352 if usetreemanifest is not None:
352 self.svfs.options['usetreemanifest'] = usetreemanifest
353 self.svfs.options['usetreemanifest'] = usetreemanifest
353
354
354 def _writerequirements(self):
355 def _writerequirements(self):
355 reqfile = self.vfs("requires", "w")
356 reqfile = self.vfs("requires", "w")
356 for r in sorted(self.requirements):
357 for r in sorted(self.requirements):
357 reqfile.write("%s\n" % r)
358 reqfile.write("%s\n" % r)
358 reqfile.close()
359 reqfile.close()
359
360
360 def _checknested(self, path):
361 def _checknested(self, path):
361 """Determine if path is a legal nested repository."""
362 """Determine if path is a legal nested repository."""
362 if not path.startswith(self.root):
363 if not path.startswith(self.root):
363 return False
364 return False
364 subpath = path[len(self.root) + 1:]
365 subpath = path[len(self.root) + 1:]
365 normsubpath = util.pconvert(subpath)
366 normsubpath = util.pconvert(subpath)
366
367
367 # XXX: Checking against the current working copy is wrong in
368 # XXX: Checking against the current working copy is wrong in
368 # the sense that it can reject things like
369 # the sense that it can reject things like
369 #
370 #
370 # $ hg cat -r 10 sub/x.txt
371 # $ hg cat -r 10 sub/x.txt
371 #
372 #
372 # if sub/ is no longer a subrepository in the working copy
373 # if sub/ is no longer a subrepository in the working copy
373 # parent revision.
374 # parent revision.
374 #
375 #
375 # However, it can of course also allow things that would have
376 # However, it can of course also allow things that would have
376 # been rejected before, such as the above cat command if sub/
377 # been rejected before, such as the above cat command if sub/
377 # is a subrepository now, but was a normal directory before.
378 # is a subrepository now, but was a normal directory before.
378 # The old path auditor would have rejected by mistake since it
379 # The old path auditor would have rejected by mistake since it
379 # panics when it sees sub/.hg/.
380 # panics when it sees sub/.hg/.
380 #
381 #
381 # All in all, checking against the working copy seems sensible
382 # All in all, checking against the working copy seems sensible
382 # since we want to prevent access to nested repositories on
383 # since we want to prevent access to nested repositories on
383 # the filesystem *now*.
384 # the filesystem *now*.
384 ctx = self[None]
385 ctx = self[None]
385 parts = util.splitpath(subpath)
386 parts = util.splitpath(subpath)
386 while parts:
387 while parts:
387 prefix = '/'.join(parts)
388 prefix = '/'.join(parts)
388 if prefix in ctx.substate:
389 if prefix in ctx.substate:
389 if prefix == normsubpath:
390 if prefix == normsubpath:
390 return True
391 return True
391 else:
392 else:
392 sub = ctx.sub(prefix)
393 sub = ctx.sub(prefix)
393 return sub.checknested(subpath[len(prefix) + 1:])
394 return sub.checknested(subpath[len(prefix) + 1:])
394 else:
395 else:
395 parts.pop()
396 parts.pop()
396 return False
397 return False
397
398
398 def peer(self):
399 def peer(self):
399 return localpeer(self) # not cached to avoid reference cycle
400 return localpeer(self) # not cached to avoid reference cycle
400
401
401 def unfiltered(self):
402 def unfiltered(self):
402 """Return unfiltered version of the repository
403 """Return unfiltered version of the repository
403
404
404 Intended to be overwritten by filtered repo."""
405 Intended to be overwritten by filtered repo."""
405 return self
406 return self
406
407
407 def filtered(self, name):
408 def filtered(self, name):
408 """Return a filtered version of a repository"""
409 """Return a filtered version of a repository"""
409 # build a new class with the mixin and the current class
410 # build a new class with the mixin and the current class
410 # (possibly subclass of the repo)
411 # (possibly subclass of the repo)
411 class proxycls(repoview.repoview, self.unfiltered().__class__):
412 class proxycls(repoview.repoview, self.unfiltered().__class__):
412 pass
413 pass
413 return proxycls(self, name)
414 return proxycls(self, name)
414
415
415 @repofilecache('bookmarks')
416 @repofilecache('bookmarks')
416 def _bookmarks(self):
417 def _bookmarks(self):
417 return bookmarks.bmstore(self)
418 return bookmarks.bmstore(self)
418
419
419 @repofilecache('bookmarks.current')
420 @repofilecache('bookmarks.current')
420 def _bookmarkcurrent(self):
421 def _bookmarkcurrent(self):
421 return bookmarks.readcurrent(self)
422 return bookmarks.readcurrent(self)
422
423
423 def bookmarkheads(self, bookmark):
424 def bookmarkheads(self, bookmark):
424 name = bookmark.split('@', 1)[0]
425 name = bookmark.split('@', 1)[0]
425 heads = []
426 heads = []
426 for mark, n in self._bookmarks.iteritems():
427 for mark, n in self._bookmarks.iteritems():
427 if mark.split('@', 1)[0] == name:
428 if mark.split('@', 1)[0] == name:
428 heads.append(n)
429 heads.append(n)
429 return heads
430 return heads
430
431
431 @storecache('phaseroots')
432 @storecache('phaseroots')
432 def _phasecache(self):
433 def _phasecache(self):
433 return phases.phasecache(self, self._phasedefaults)
434 return phases.phasecache(self, self._phasedefaults)
434
435
435 @storecache('obsstore')
436 @storecache('obsstore')
436 def obsstore(self):
437 def obsstore(self):
437 # read default format for new obsstore.
438 # read default format for new obsstore.
438 defaultformat = self.ui.configint('format', 'obsstore-version', None)
439 defaultformat = self.ui.configint('format', 'obsstore-version', None)
439 # rely on obsstore class default when possible.
440 # rely on obsstore class default when possible.
440 kwargs = {}
441 kwargs = {}
441 if defaultformat is not None:
442 if defaultformat is not None:
442 kwargs['defaultformat'] = defaultformat
443 kwargs['defaultformat'] = defaultformat
443 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
444 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
444 store = obsolete.obsstore(self.svfs, readonly=readonly,
445 store = obsolete.obsstore(self.svfs, readonly=readonly,
445 **kwargs)
446 **kwargs)
446 if store and readonly:
447 if store and readonly:
447 self.ui.warn(
448 self.ui.warn(
448 _('obsolete feature not enabled but %i markers found!\n')
449 _('obsolete feature not enabled but %i markers found!\n')
449 % len(list(store)))
450 % len(list(store)))
450 return store
451 return store
451
452
452 @storecache('00changelog.i')
453 @storecache('00changelog.i')
453 def changelog(self):
454 def changelog(self):
454 c = changelog.changelog(self.svfs)
455 c = changelog.changelog(self.svfs)
455 if 'HG_PENDING' in os.environ:
456 if 'HG_PENDING' in os.environ:
456 p = os.environ['HG_PENDING']
457 p = os.environ['HG_PENDING']
457 if p.startswith(self.root):
458 if p.startswith(self.root):
458 c.readpending('00changelog.i.a')
459 c.readpending('00changelog.i.a')
459 return c
460 return c
460
461
461 @storecache('00manifest.i')
462 @storecache('00manifest.i')
462 def manifest(self):
463 def manifest(self):
463 return manifest.manifest(self.svfs)
464 return manifest.manifest(self.svfs)
464
465
465 @repofilecache('dirstate')
466 @repofilecache('dirstate')
466 def dirstate(self):
467 def dirstate(self):
467 warned = [0]
468 warned = [0]
468 def validate(node):
469 def validate(node):
469 try:
470 try:
470 self.changelog.rev(node)
471 self.changelog.rev(node)
471 return node
472 return node
472 except error.LookupError:
473 except error.LookupError:
473 if not warned[0]:
474 if not warned[0]:
474 warned[0] = True
475 warned[0] = True
475 self.ui.warn(_("warning: ignoring unknown"
476 self.ui.warn(_("warning: ignoring unknown"
476 " working parent %s!\n") % short(node))
477 " working parent %s!\n") % short(node))
477 return nullid
478 return nullid
478
479
479 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
480
481
481 def __getitem__(self, changeid):
482 def __getitem__(self, changeid):
482 if changeid is None:
483 if changeid is None:
483 return context.workingctx(self)
484 return context.workingctx(self)
484 if isinstance(changeid, slice):
485 if isinstance(changeid, slice):
485 return [context.changectx(self, i)
486 return [context.changectx(self, i)
486 for i in xrange(*changeid.indices(len(self)))
487 for i in xrange(*changeid.indices(len(self)))
487 if i not in self.changelog.filteredrevs]
488 if i not in self.changelog.filteredrevs]
488 return context.changectx(self, changeid)
489 return context.changectx(self, changeid)
489
490
490 def __contains__(self, changeid):
491 def __contains__(self, changeid):
491 try:
492 try:
492 self[changeid]
493 self[changeid]
493 return True
494 return True
494 except error.RepoLookupError:
495 except error.RepoLookupError:
495 return False
496 return False
496
497
497 def __nonzero__(self):
498 def __nonzero__(self):
498 return True
499 return True
499
500
500 def __len__(self):
501 def __len__(self):
501 return len(self.changelog)
502 return len(self.changelog)
502
503
503 def __iter__(self):
504 def __iter__(self):
504 return iter(self.changelog)
505 return iter(self.changelog)
505
506
506 def revs(self, expr, *args):
507 def revs(self, expr, *args):
507 '''Return a list of revisions matching the given revset'''
508 '''Return a list of revisions matching the given revset'''
508 expr = revset.formatspec(expr, *args)
509 expr = revset.formatspec(expr, *args)
509 m = revset.match(None, expr)
510 m = revset.match(None, expr)
510 return m(self)
511 return m(self)
511
512
512 def set(self, expr, *args):
513 def set(self, expr, *args):
513 '''
514 '''
514 Yield a context for each matching revision, after doing arg
515 Yield a context for each matching revision, after doing arg
515 replacement via revset.formatspec
516 replacement via revset.formatspec
516 '''
517 '''
517 for r in self.revs(expr, *args):
518 for r in self.revs(expr, *args):
518 yield self[r]
519 yield self[r]
519
520
520 def url(self):
521 def url(self):
521 return 'file:' + self.root
522 return 'file:' + self.root
522
523
523 def hook(self, name, throw=False, **args):
524 def hook(self, name, throw=False, **args):
524 """Call a hook, passing this repo instance.
525 """Call a hook, passing this repo instance.
525
526
526 This a convenience method to aid invoking hooks. Extensions likely
527 This a convenience method to aid invoking hooks. Extensions likely
527 won't call this unless they have registered a custom hook or are
528 won't call this unless they have registered a custom hook or are
528 replacing code that is expected to call a hook.
529 replacing code that is expected to call a hook.
529 """
530 """
530 return hook.hook(self.ui, self, name, throw, **args)
531 return hook.hook(self.ui, self, name, throw, **args)
531
532
532 @unfilteredmethod
533 @unfilteredmethod
533 def _tag(self, names, node, message, local, user, date, extra={},
534 def _tag(self, names, node, message, local, user, date, extra={},
534 editor=False):
535 editor=False):
535 if isinstance(names, str):
536 if isinstance(names, str):
536 names = (names,)
537 names = (names,)
537
538
538 branches = self.branchmap()
539 branches = self.branchmap()
539 for name in names:
540 for name in names:
540 self.hook('pretag', throw=True, node=hex(node), tag=name,
541 self.hook('pretag', throw=True, node=hex(node), tag=name,
541 local=local)
542 local=local)
542 if name in branches:
543 if name in branches:
543 self.ui.warn(_("warning: tag %s conflicts with existing"
544 self.ui.warn(_("warning: tag %s conflicts with existing"
544 " branch name\n") % name)
545 " branch name\n") % name)
545
546
546 def writetags(fp, names, munge, prevtags):
547 def writetags(fp, names, munge, prevtags):
547 fp.seek(0, 2)
548 fp.seek(0, 2)
548 if prevtags and prevtags[-1] != '\n':
549 if prevtags and prevtags[-1] != '\n':
549 fp.write('\n')
550 fp.write('\n')
550 for name in names:
551 for name in names:
551 if munge:
552 if munge:
552 m = munge(name)
553 m = munge(name)
553 else:
554 else:
554 m = name
555 m = name
555
556
556 if (self._tagscache.tagtypes and
557 if (self._tagscache.tagtypes and
557 name in self._tagscache.tagtypes):
558 name in self._tagscache.tagtypes):
558 old = self.tags().get(name, nullid)
559 old = self.tags().get(name, nullid)
559 fp.write('%s %s\n' % (hex(old), m))
560 fp.write('%s %s\n' % (hex(old), m))
560 fp.write('%s %s\n' % (hex(node), m))
561 fp.write('%s %s\n' % (hex(node), m))
561 fp.close()
562 fp.close()
562
563
563 prevtags = ''
564 prevtags = ''
564 if local:
565 if local:
565 try:
566 try:
566 fp = self.vfs('localtags', 'r+')
567 fp = self.vfs('localtags', 'r+')
567 except IOError:
568 except IOError:
568 fp = self.vfs('localtags', 'a')
569 fp = self.vfs('localtags', 'a')
569 else:
570 else:
570 prevtags = fp.read()
571 prevtags = fp.read()
571
572
572 # local tags are stored in the current charset
573 # local tags are stored in the current charset
573 writetags(fp, names, None, prevtags)
574 writetags(fp, names, None, prevtags)
574 for name in names:
575 for name in names:
575 self.hook('tag', node=hex(node), tag=name, local=local)
576 self.hook('tag', node=hex(node), tag=name, local=local)
576 return
577 return
577
578
578 try:
579 try:
579 fp = self.wfile('.hgtags', 'rb+')
580 fp = self.wfile('.hgtags', 'rb+')
580 except IOError, e:
581 except IOError, e:
581 if e.errno != errno.ENOENT:
582 if e.errno != errno.ENOENT:
582 raise
583 raise
583 fp = self.wfile('.hgtags', 'ab')
584 fp = self.wfile('.hgtags', 'ab')
584 else:
585 else:
585 prevtags = fp.read()
586 prevtags = fp.read()
586
587
587 # committed tags are stored in UTF-8
588 # committed tags are stored in UTF-8
588 writetags(fp, names, encoding.fromlocal, prevtags)
589 writetags(fp, names, encoding.fromlocal, prevtags)
589
590
590 fp.close()
591 fp.close()
591
592
592 self.invalidatecaches()
593 self.invalidatecaches()
593
594
594 if '.hgtags' not in self.dirstate:
595 if '.hgtags' not in self.dirstate:
595 self[None].add(['.hgtags'])
596 self[None].add(['.hgtags'])
596
597
597 m = matchmod.exact(self.root, '', ['.hgtags'])
598 m = matchmod.exact(self.root, '', ['.hgtags'])
598 tagnode = self.commit(message, user, date, extra=extra, match=m,
599 tagnode = self.commit(message, user, date, extra=extra, match=m,
599 editor=editor)
600 editor=editor)
600
601
601 for name in names:
602 for name in names:
602 self.hook('tag', node=hex(node), tag=name, local=local)
603 self.hook('tag', node=hex(node), tag=name, local=local)
603
604
604 return tagnode
605 return tagnode
605
606
606 def tag(self, names, node, message, local, user, date, editor=False):
607 def tag(self, names, node, message, local, user, date, editor=False):
607 '''tag a revision with one or more symbolic names.
608 '''tag a revision with one or more symbolic names.
608
609
609 names is a list of strings or, when adding a single tag, names may be a
610 names is a list of strings or, when adding a single tag, names may be a
610 string.
611 string.
611
612
612 if local is True, the tags are stored in a per-repository file.
613 if local is True, the tags are stored in a per-repository file.
613 otherwise, they are stored in the .hgtags file, and a new
614 otherwise, they are stored in the .hgtags file, and a new
614 changeset is committed with the change.
615 changeset is committed with the change.
615
616
616 keyword arguments:
617 keyword arguments:
617
618
618 local: whether to store tags in non-version-controlled file
619 local: whether to store tags in non-version-controlled file
619 (default False)
620 (default False)
620
621
621 message: commit message to use if committing
622 message: commit message to use if committing
622
623
623 user: name of user to use if committing
624 user: name of user to use if committing
624
625
625 date: date tuple to use if committing'''
626 date: date tuple to use if committing'''
626
627
627 if not local:
628 if not local:
628 m = matchmod.exact(self.root, '', ['.hgtags'])
629 m = matchmod.exact(self.root, '', ['.hgtags'])
629 if util.any(self.status(match=m, unknown=True, ignored=True)):
630 if util.any(self.status(match=m, unknown=True, ignored=True)):
630 raise util.Abort(_('working copy of .hgtags is changed'),
631 raise util.Abort(_('working copy of .hgtags is changed'),
631 hint=_('please commit .hgtags manually'))
632 hint=_('please commit .hgtags manually'))
632
633
633 self.tags() # instantiate the cache
634 self.tags() # instantiate the cache
634 self._tag(names, node, message, local, user, date, editor=editor)
635 self._tag(names, node, message, local, user, date, editor=editor)
635
636
636 @filteredpropertycache
637 @filteredpropertycache
637 def _tagscache(self):
638 def _tagscache(self):
638 '''Returns a tagscache object that contains various tags related
639 '''Returns a tagscache object that contains various tags related
639 caches.'''
640 caches.'''
640
641
641 # This simplifies its cache management by having one decorated
642 # This simplifies its cache management by having one decorated
642 # function (this one) and the rest simply fetch things from it.
643 # function (this one) and the rest simply fetch things from it.
643 class tagscache(object):
644 class tagscache(object):
644 def __init__(self):
645 def __init__(self):
645 # These two define the set of tags for this repository. tags
646 # These two define the set of tags for this repository. tags
646 # maps tag name to node; tagtypes maps tag name to 'global' or
647 # maps tag name to node; tagtypes maps tag name to 'global' or
647 # 'local'. (Global tags are defined by .hgtags across all
648 # 'local'. (Global tags are defined by .hgtags across all
648 # heads, and local tags are defined in .hg/localtags.)
649 # heads, and local tags are defined in .hg/localtags.)
649 # They constitute the in-memory cache of tags.
650 # They constitute the in-memory cache of tags.
650 self.tags = self.tagtypes = None
651 self.tags = self.tagtypes = None
651
652
652 self.nodetagscache = self.tagslist = None
653 self.nodetagscache = self.tagslist = None
653
654
654 cache = tagscache()
655 cache = tagscache()
655 cache.tags, cache.tagtypes = self._findtags()
656 cache.tags, cache.tagtypes = self._findtags()
656
657
657 return cache
658 return cache
658
659
659 def tags(self):
660 def tags(self):
660 '''return a mapping of tag to node'''
661 '''return a mapping of tag to node'''
661 t = {}
662 t = {}
662 if self.changelog.filteredrevs:
663 if self.changelog.filteredrevs:
663 tags, tt = self._findtags()
664 tags, tt = self._findtags()
664 else:
665 else:
665 tags = self._tagscache.tags
666 tags = self._tagscache.tags
666 for k, v in tags.iteritems():
667 for k, v in tags.iteritems():
667 try:
668 try:
668 # ignore tags to unknown nodes
669 # ignore tags to unknown nodes
669 self.changelog.rev(v)
670 self.changelog.rev(v)
670 t[k] = v
671 t[k] = v
671 except (error.LookupError, ValueError):
672 except (error.LookupError, ValueError):
672 pass
673 pass
673 return t
674 return t
674
675
675 def _findtags(self):
676 def _findtags(self):
676 '''Do the hard work of finding tags. Return a pair of dicts
677 '''Do the hard work of finding tags. Return a pair of dicts
677 (tags, tagtypes) where tags maps tag name to node, and tagtypes
678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
678 maps tag name to a string like \'global\' or \'local\'.
679 maps tag name to a string like \'global\' or \'local\'.
679 Subclasses or extensions are free to add their own tags, but
680 Subclasses or extensions are free to add their own tags, but
680 should be aware that the returned dicts will be retained for the
681 should be aware that the returned dicts will be retained for the
681 duration of the localrepo object.'''
682 duration of the localrepo object.'''
682
683
683 # XXX what tagtype should subclasses/extensions use? Currently
684 # XXX what tagtype should subclasses/extensions use? Currently
684 # mq and bookmarks add tags, but do not set the tagtype at all.
685 # mq and bookmarks add tags, but do not set the tagtype at all.
685 # Should each extension invent its own tag type? Should there
686 # Should each extension invent its own tag type? Should there
686 # be one tagtype for all such "virtual" tags? Or is the status
687 # be one tagtype for all such "virtual" tags? Or is the status
687 # quo fine?
688 # quo fine?
688
689
689 alltags = {} # map tag name to (node, hist)
690 alltags = {} # map tag name to (node, hist)
690 tagtypes = {}
691 tagtypes = {}
691
692
692 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
693 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
694
695
695 # Build the return dicts. Have to re-encode tag names because
696 # Build the return dicts. Have to re-encode tag names because
696 # the tags module always uses UTF-8 (in order not to lose info
697 # the tags module always uses UTF-8 (in order not to lose info
697 # writing to the cache), but the rest of Mercurial wants them in
698 # writing to the cache), but the rest of Mercurial wants them in
698 # local encoding.
699 # local encoding.
699 tags = {}
700 tags = {}
700 for (name, (node, hist)) in alltags.iteritems():
701 for (name, (node, hist)) in alltags.iteritems():
701 if node != nullid:
702 if node != nullid:
702 tags[encoding.tolocal(name)] = node
703 tags[encoding.tolocal(name)] = node
703 tags['tip'] = self.changelog.tip()
704 tags['tip'] = self.changelog.tip()
704 tagtypes = dict([(encoding.tolocal(name), value)
705 tagtypes = dict([(encoding.tolocal(name), value)
705 for (name, value) in tagtypes.iteritems()])
706 for (name, value) in tagtypes.iteritems()])
706 return (tags, tagtypes)
707 return (tags, tagtypes)
707
708
708 def tagtype(self, tagname):
709 def tagtype(self, tagname):
709 '''
710 '''
710 return the type of the given tag. result can be:
711 return the type of the given tag. result can be:
711
712
712 'local' : a local tag
713 'local' : a local tag
713 'global' : a global tag
714 'global' : a global tag
714 None : tag does not exist
715 None : tag does not exist
715 '''
716 '''
716
717
717 return self._tagscache.tagtypes.get(tagname)
718 return self._tagscache.tagtypes.get(tagname)
718
719
719 def tagslist(self):
720 def tagslist(self):
720 '''return a list of tags ordered by revision'''
721 '''return a list of tags ordered by revision'''
721 if not self._tagscache.tagslist:
722 if not self._tagscache.tagslist:
722 l = []
723 l = []
723 for t, n in self.tags().iteritems():
724 for t, n in self.tags().iteritems():
724 l.append((self.changelog.rev(n), t, n))
725 l.append((self.changelog.rev(n), t, n))
725 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
726
727
727 return self._tagscache.tagslist
728 return self._tagscache.tagslist
728
729
729 def nodetags(self, node):
730 def nodetags(self, node):
730 '''return the tags associated with a node'''
731 '''return the tags associated with a node'''
731 if not self._tagscache.nodetagscache:
732 if not self._tagscache.nodetagscache:
732 nodetagscache = {}
733 nodetagscache = {}
733 for t, n in self._tagscache.tags.iteritems():
734 for t, n in self._tagscache.tags.iteritems():
734 nodetagscache.setdefault(n, []).append(t)
735 nodetagscache.setdefault(n, []).append(t)
735 for tags in nodetagscache.itervalues():
736 for tags in nodetagscache.itervalues():
736 tags.sort()
737 tags.sort()
737 self._tagscache.nodetagscache = nodetagscache
738 self._tagscache.nodetagscache = nodetagscache
738 return self._tagscache.nodetagscache.get(node, [])
739 return self._tagscache.nodetagscache.get(node, [])
739
740
740 def nodebookmarks(self, node):
741 def nodebookmarks(self, node):
741 marks = []
742 marks = []
742 for bookmark, n in self._bookmarks.iteritems():
743 for bookmark, n in self._bookmarks.iteritems():
743 if n == node:
744 if n == node:
744 marks.append(bookmark)
745 marks.append(bookmark)
745 return sorted(marks)
746 return sorted(marks)
746
747
747 def branchmap(self):
748 def branchmap(self):
748 '''returns a dictionary {branch: [branchheads]} with branchheads
749 '''returns a dictionary {branch: [branchheads]} with branchheads
749 ordered by increasing revision number'''
750 ordered by increasing revision number'''
750 branchmap.updatecache(self)
751 branchmap.updatecache(self)
751 return self._branchcaches[self.filtername]
752 return self._branchcaches[self.filtername]
752
753
753 @unfilteredmethod
754 @unfilteredmethod
754 def revbranchcache(self):
755 def revbranchcache(self):
755 if not self._revbranchcache:
756 if not self._revbranchcache:
756 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
757 return self._revbranchcache
758 return self._revbranchcache
758
759
759 def branchtip(self, branch, ignoremissing=False):
760 def branchtip(self, branch, ignoremissing=False):
760 '''return the tip node for a given branch
761 '''return the tip node for a given branch
761
762
762 If ignoremissing is True, then this method will not raise an error.
763 If ignoremissing is True, then this method will not raise an error.
763 This is helpful for callers that only expect None for a missing branch
764 This is helpful for callers that only expect None for a missing branch
764 (e.g. namespace).
765 (e.g. namespace).
765
766
766 '''
767 '''
767 try:
768 try:
768 return self.branchmap().branchtip(branch)
769 return self.branchmap().branchtip(branch)
769 except KeyError:
770 except KeyError:
770 if not ignoremissing:
771 if not ignoremissing:
771 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
772 else:
773 else:
773 pass
774 pass
774
775
775 def lookup(self, key):
776 def lookup(self, key):
776 return self[key].node()
777 return self[key].node()
777
778
778 def lookupbranch(self, key, remote=None):
779 def lookupbranch(self, key, remote=None):
779 repo = remote or self
780 repo = remote or self
780 if key in repo.branchmap():
781 if key in repo.branchmap():
781 return key
782 return key
782
783
783 repo = (remote and remote.local()) and remote or self
784 repo = (remote and remote.local()) and remote or self
784 return repo[key].branch()
785 return repo[key].branch()
785
786
786 def known(self, nodes):
787 def known(self, nodes):
787 nm = self.changelog.nodemap
788 nm = self.changelog.nodemap
788 pc = self._phasecache
789 pc = self._phasecache
789 result = []
790 result = []
790 for n in nodes:
791 for n in nodes:
791 r = nm.get(n)
792 r = nm.get(n)
792 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 result.append(resp)
794 result.append(resp)
794 return result
795 return result
795
796
796 def local(self):
797 def local(self):
797 return self
798 return self
798
799
799 def cancopy(self):
800 def cancopy(self):
800 # so statichttprepo's override of local() works
801 # so statichttprepo's override of local() works
801 if not self.local():
802 if not self.local():
802 return False
803 return False
803 if not self.ui.configbool('phases', 'publish', True):
804 if not self.ui.configbool('phases', 'publish', True):
804 return True
805 return True
805 # if publishing we can't copy if there is filtered content
806 # if publishing we can't copy if there is filtered content
806 return not self.filtered('visible').changelog.filteredrevs
807 return not self.filtered('visible').changelog.filteredrevs
807
808
808 def shared(self):
809 def shared(self):
809 '''the type of shared repository (None if not shared)'''
810 '''the type of shared repository (None if not shared)'''
810 if self.sharedpath != self.path:
811 if self.sharedpath != self.path:
811 return 'store'
812 return 'store'
812 return None
813 return None
813
814
814 def join(self, f, *insidef):
815 def join(self, f, *insidef):
815 return self.vfs.join(os.path.join(f, *insidef))
816 return self.vfs.join(os.path.join(f, *insidef))
816
817
817 def wjoin(self, f, *insidef):
818 def wjoin(self, f, *insidef):
818 return self.vfs.reljoin(self.root, f, *insidef)
819 return self.vfs.reljoin(self.root, f, *insidef)
819
820
820 def file(self, f):
821 def file(self, f):
821 if f[0] == '/':
822 if f[0] == '/':
822 f = f[1:]
823 f = f[1:]
823 return filelog.filelog(self.svfs, f)
824 return filelog.filelog(self.svfs, f)
824
825
825 def changectx(self, changeid):
826 def changectx(self, changeid):
826 return self[changeid]
827 return self[changeid]
827
828
828 def parents(self, changeid=None):
829 def parents(self, changeid=None):
829 '''get list of changectxs for parents of changeid'''
830 '''get list of changectxs for parents of changeid'''
830 return self[changeid].parents()
831 return self[changeid].parents()
831
832
832 def setparents(self, p1, p2=nullid):
833 def setparents(self, p1, p2=nullid):
833 self.dirstate.beginparentchange()
834 self.dirstate.beginparentchange()
834 copies = self.dirstate.setparents(p1, p2)
835 copies = self.dirstate.setparents(p1, p2)
835 pctx = self[p1]
836 pctx = self[p1]
836 if copies:
837 if copies:
837 # Adjust copy records, the dirstate cannot do it, it
838 # Adjust copy records, the dirstate cannot do it, it
838 # requires access to parents manifests. Preserve them
839 # requires access to parents manifests. Preserve them
839 # only for entries added to first parent.
840 # only for entries added to first parent.
840 for f in copies:
841 for f in copies:
841 if f not in pctx and copies[f] in pctx:
842 if f not in pctx and copies[f] in pctx:
842 self.dirstate.copy(copies[f], f)
843 self.dirstate.copy(copies[f], f)
843 if p2 == nullid:
844 if p2 == nullid:
844 for f, s in sorted(self.dirstate.copies().items()):
845 for f, s in sorted(self.dirstate.copies().items()):
845 if f not in pctx and s not in pctx:
846 if f not in pctx and s not in pctx:
846 self.dirstate.copy(None, f)
847 self.dirstate.copy(None, f)
847 self.dirstate.endparentchange()
848 self.dirstate.endparentchange()
848
849
849 def filectx(self, path, changeid=None, fileid=None):
850 def filectx(self, path, changeid=None, fileid=None):
850 """changeid can be a changeset revision, node, or tag.
851 """changeid can be a changeset revision, node, or tag.
851 fileid can be a file revision or node."""
852 fileid can be a file revision or node."""
852 return context.filectx(self, path, changeid, fileid)
853 return context.filectx(self, path, changeid, fileid)
853
854
854 def getcwd(self):
855 def getcwd(self):
855 return self.dirstate.getcwd()
856 return self.dirstate.getcwd()
856
857
857 def pathto(self, f, cwd=None):
858 def pathto(self, f, cwd=None):
858 return self.dirstate.pathto(f, cwd)
859 return self.dirstate.pathto(f, cwd)
859
860
860 def wfile(self, f, mode='r'):
861 def wfile(self, f, mode='r'):
861 return self.wvfs(f, mode)
862 return self.wvfs(f, mode)
862
863
863 def _link(self, f):
864 def _link(self, f):
864 return self.wvfs.islink(f)
865 return self.wvfs.islink(f)
865
866
866 def _loadfilter(self, filter):
867 def _loadfilter(self, filter):
867 if filter not in self.filterpats:
868 if filter not in self.filterpats:
868 l = []
869 l = []
869 for pat, cmd in self.ui.configitems(filter):
870 for pat, cmd in self.ui.configitems(filter):
870 if cmd == '!':
871 if cmd == '!':
871 continue
872 continue
872 mf = matchmod.match(self.root, '', [pat])
873 mf = matchmod.match(self.root, '', [pat])
873 fn = None
874 fn = None
874 params = cmd
875 params = cmd
875 for name, filterfn in self._datafilters.iteritems():
876 for name, filterfn in self._datafilters.iteritems():
876 if cmd.startswith(name):
877 if cmd.startswith(name):
877 fn = filterfn
878 fn = filterfn
878 params = cmd[len(name):].lstrip()
879 params = cmd[len(name):].lstrip()
879 break
880 break
880 if not fn:
881 if not fn:
881 fn = lambda s, c, **kwargs: util.filter(s, c)
882 fn = lambda s, c, **kwargs: util.filter(s, c)
882 # Wrap old filters not supporting keyword arguments
883 # Wrap old filters not supporting keyword arguments
883 if not inspect.getargspec(fn)[2]:
884 if not inspect.getargspec(fn)[2]:
884 oldfn = fn
885 oldfn = fn
885 fn = lambda s, c, **kwargs: oldfn(s, c)
886 fn = lambda s, c, **kwargs: oldfn(s, c)
886 l.append((mf, fn, params))
887 l.append((mf, fn, params))
887 self.filterpats[filter] = l
888 self.filterpats[filter] = l
888 return self.filterpats[filter]
889 return self.filterpats[filter]
889
890
890 def _filter(self, filterpats, filename, data):
891 def _filter(self, filterpats, filename, data):
891 for mf, fn, cmd in filterpats:
892 for mf, fn, cmd in filterpats:
892 if mf(filename):
893 if mf(filename):
893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 break
896 break
896
897
897 return data
898 return data
898
899
899 @unfilteredpropertycache
900 @unfilteredpropertycache
900 def _encodefilterpats(self):
901 def _encodefilterpats(self):
901 return self._loadfilter('encode')
902 return self._loadfilter('encode')
902
903
903 @unfilteredpropertycache
904 @unfilteredpropertycache
904 def _decodefilterpats(self):
905 def _decodefilterpats(self):
905 return self._loadfilter('decode')
906 return self._loadfilter('decode')
906
907
907 def adddatafilter(self, name, filter):
908 def adddatafilter(self, name, filter):
908 self._datafilters[name] = filter
909 self._datafilters[name] = filter
909
910
910 def wread(self, filename):
911 def wread(self, filename):
911 if self._link(filename):
912 if self._link(filename):
912 data = self.wvfs.readlink(filename)
913 data = self.wvfs.readlink(filename)
913 else:
914 else:
914 data = self.wvfs.read(filename)
915 data = self.wvfs.read(filename)
915 return self._filter(self._encodefilterpats, filename, data)
916 return self._filter(self._encodefilterpats, filename, data)
916
917
917 def wwrite(self, filename, data, flags):
918 def wwrite(self, filename, data, flags):
918 """write ``data`` into ``filename`` in the working directory
919 """write ``data`` into ``filename`` in the working directory
919
920
920 This returns length of written (maybe decoded) data.
921 This returns length of written (maybe decoded) data.
921 """
922 """
922 data = self._filter(self._decodefilterpats, filename, data)
923 data = self._filter(self._decodefilterpats, filename, data)
923 if 'l' in flags:
924 if 'l' in flags:
924 self.wvfs.symlink(data, filename)
925 self.wvfs.symlink(data, filename)
925 else:
926 else:
926 self.wvfs.write(filename, data)
927 self.wvfs.write(filename, data)
927 if 'x' in flags:
928 if 'x' in flags:
928 self.wvfs.setflags(filename, False, True)
929 self.wvfs.setflags(filename, False, True)
929 return len(data)
930 return len(data)
930
931
931 def wwritedata(self, filename, data):
932 def wwritedata(self, filename, data):
932 return self._filter(self._decodefilterpats, filename, data)
933 return self._filter(self._decodefilterpats, filename, data)
933
934
934 def currenttransaction(self):
935 def currenttransaction(self):
935 """return the current transaction or None if non exists"""
936 """return the current transaction or None if non exists"""
936 if self._transref:
937 if self._transref:
937 tr = self._transref()
938 tr = self._transref()
938 else:
939 else:
939 tr = None
940 tr = None
940
941
941 if tr and tr.running():
942 if tr and tr.running():
942 return tr
943 return tr
943 return None
944 return None
944
945
945 def transaction(self, desc, report=None):
946 def transaction(self, desc, report=None):
946 if (self.ui.configbool('devel', 'all')
947 if (self.ui.configbool('devel', 'all')
947 or self.ui.configbool('devel', 'check-locks')):
948 or self.ui.configbool('devel', 'check-locks')):
948 l = self._lockref and self._lockref()
949 l = self._lockref and self._lockref()
949 if l is None or not l.held:
950 if l is None or not l.held:
950 scmutil.develwarn(self.ui, 'transaction with no lock')
951 scmutil.develwarn(self.ui, 'transaction with no lock')
951 tr = self.currenttransaction()
952 tr = self.currenttransaction()
952 if tr is not None:
953 if tr is not None:
953 return tr.nest()
954 return tr.nest()
954
955
955 # abort here if the journal already exists
956 # abort here if the journal already exists
956 if self.svfs.exists("journal"):
957 if self.svfs.exists("journal"):
957 raise error.RepoError(
958 raise error.RepoError(
958 _("abandoned transaction found"),
959 _("abandoned transaction found"),
959 hint=_("run 'hg recover' to clean up transaction"))
960 hint=_("run 'hg recover' to clean up transaction"))
960
961
961 self.hook('pretxnopen', throw=True, txnname=desc)
962 self.hook('pretxnopen', throw=True, txnname=desc)
962
963
963 self._writejournal(desc)
964 self._writejournal(desc)
964 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
965 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
965 if report:
966 if report:
966 rp = report
967 rp = report
967 else:
968 else:
968 rp = self.ui.warn
969 rp = self.ui.warn
969 vfsmap = {'plain': self.vfs} # root of .hg/
970 vfsmap = {'plain': self.vfs} # root of .hg/
970 # we must avoid cyclic reference between repo and transaction.
971 # we must avoid cyclic reference between repo and transaction.
971 reporef = weakref.ref(self)
972 reporef = weakref.ref(self)
972 def validate(tr):
973 def validate(tr):
973 """will run pre-closing hooks"""
974 """will run pre-closing hooks"""
974 pending = lambda: tr.writepending() and self.root or ""
975 pending = lambda: tr.writepending() and self.root or ""
975 reporef().hook('pretxnclose', throw=True, pending=pending,
976 reporef().hook('pretxnclose', throw=True, pending=pending,
976 xnname=desc, **tr.hookargs)
977 xnname=desc, **tr.hookargs)
977
978
978 tr = transaction.transaction(rp, self.sopener, vfsmap,
979 tr = transaction.transaction(rp, self.sopener, vfsmap,
979 "journal",
980 "journal",
980 "undo",
981 "undo",
981 aftertrans(renames),
982 aftertrans(renames),
982 self.store.createmode,
983 self.store.createmode,
983 validator=validate)
984 validator=validate)
984
985
985 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
986 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
986 tr.hookargs['TXNID'] = trid
987 tr.hookargs['TXNID'] = trid
987 # note: writing the fncache only during finalize mean that the file is
988 # note: writing the fncache only during finalize mean that the file is
988 # outdated when running hooks. As fncache is used for streaming clone,
989 # outdated when running hooks. As fncache is used for streaming clone,
989 # this is not expected to break anything that happen during the hooks.
990 # this is not expected to break anything that happen during the hooks.
990 tr.addfinalize('flush-fncache', self.store.write)
991 tr.addfinalize('flush-fncache', self.store.write)
991 def txnclosehook(tr2):
992 def txnclosehook(tr2):
992 """To be run if transaction is successful, will schedule a hook run
993 """To be run if transaction is successful, will schedule a hook run
993 """
994 """
994 def hook():
995 def hook():
995 reporef().hook('txnclose', throw=False, txnname=desc,
996 reporef().hook('txnclose', throw=False, txnname=desc,
996 **tr2.hookargs)
997 **tr2.hookargs)
997 reporef()._afterlock(hook)
998 reporef()._afterlock(hook)
998 tr.addfinalize('txnclose-hook', txnclosehook)
999 tr.addfinalize('txnclose-hook', txnclosehook)
999 def txnaborthook(tr2):
1000 def txnaborthook(tr2):
1000 """To be run if transaction is aborted
1001 """To be run if transaction is aborted
1001 """
1002 """
1002 reporef().hook('txnabort', throw=False, txnname=desc,
1003 reporef().hook('txnabort', throw=False, txnname=desc,
1003 **tr2.hookargs)
1004 **tr2.hookargs)
1004 tr.addabort('txnabort-hook', txnaborthook)
1005 tr.addabort('txnabort-hook', txnaborthook)
1005 self._transref = weakref.ref(tr)
1006 self._transref = weakref.ref(tr)
1006 return tr
1007 return tr
1007
1008
1008 def _journalfiles(self):
1009 def _journalfiles(self):
1009 return ((self.svfs, 'journal'),
1010 return ((self.svfs, 'journal'),
1010 (self.vfs, 'journal.dirstate'),
1011 (self.vfs, 'journal.dirstate'),
1011 (self.vfs, 'journal.branch'),
1012 (self.vfs, 'journal.branch'),
1012 (self.vfs, 'journal.desc'),
1013 (self.vfs, 'journal.desc'),
1013 (self.vfs, 'journal.bookmarks'),
1014 (self.vfs, 'journal.bookmarks'),
1014 (self.svfs, 'journal.phaseroots'))
1015 (self.svfs, 'journal.phaseroots'))
1015
1016
1016 def undofiles(self):
1017 def undofiles(self):
1017 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1018 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1018
1019
1019 def _writejournal(self, desc):
1020 def _writejournal(self, desc):
1020 self.vfs.write("journal.dirstate",
1021 self.vfs.write("journal.dirstate",
1021 self.vfs.tryread("dirstate"))
1022 self.vfs.tryread("dirstate"))
1022 self.vfs.write("journal.branch",
1023 self.vfs.write("journal.branch",
1023 encoding.fromlocal(self.dirstate.branch()))
1024 encoding.fromlocal(self.dirstate.branch()))
1024 self.vfs.write("journal.desc",
1025 self.vfs.write("journal.desc",
1025 "%d\n%s\n" % (len(self), desc))
1026 "%d\n%s\n" % (len(self), desc))
1026 self.vfs.write("journal.bookmarks",
1027 self.vfs.write("journal.bookmarks",
1027 self.vfs.tryread("bookmarks"))
1028 self.vfs.tryread("bookmarks"))
1028 self.svfs.write("journal.phaseroots",
1029 self.svfs.write("journal.phaseroots",
1029 self.svfs.tryread("phaseroots"))
1030 self.svfs.tryread("phaseroots"))
1030
1031
1031 def recover(self):
1032 def recover(self):
1032 lock = self.lock()
1033 lock = self.lock()
1033 try:
1034 try:
1034 if self.svfs.exists("journal"):
1035 if self.svfs.exists("journal"):
1035 self.ui.status(_("rolling back interrupted transaction\n"))
1036 self.ui.status(_("rolling back interrupted transaction\n"))
1036 vfsmap = {'': self.svfs,
1037 vfsmap = {'': self.svfs,
1037 'plain': self.vfs,}
1038 'plain': self.vfs,}
1038 transaction.rollback(self.svfs, vfsmap, "journal",
1039 transaction.rollback(self.svfs, vfsmap, "journal",
1039 self.ui.warn)
1040 self.ui.warn)
1040 self.invalidate()
1041 self.invalidate()
1041 return True
1042 return True
1042 else:
1043 else:
1043 self.ui.warn(_("no interrupted transaction available\n"))
1044 self.ui.warn(_("no interrupted transaction available\n"))
1044 return False
1045 return False
1045 finally:
1046 finally:
1046 lock.release()
1047 lock.release()
1047
1048
1048 def rollback(self, dryrun=False, force=False):
1049 def rollback(self, dryrun=False, force=False):
1049 wlock = lock = None
1050 wlock = lock = None
1050 try:
1051 try:
1051 wlock = self.wlock()
1052 wlock = self.wlock()
1052 lock = self.lock()
1053 lock = self.lock()
1053 if self.svfs.exists("undo"):
1054 if self.svfs.exists("undo"):
1054 return self._rollback(dryrun, force)
1055 return self._rollback(dryrun, force)
1055 else:
1056 else:
1056 self.ui.warn(_("no rollback information available\n"))
1057 self.ui.warn(_("no rollback information available\n"))
1057 return 1
1058 return 1
1058 finally:
1059 finally:
1059 release(lock, wlock)
1060 release(lock, wlock)
1060
1061
1061 @unfilteredmethod # Until we get smarter cache management
1062 @unfilteredmethod # Until we get smarter cache management
1062 def _rollback(self, dryrun, force):
1063 def _rollback(self, dryrun, force):
1063 ui = self.ui
1064 ui = self.ui
1064 try:
1065 try:
1065 args = self.vfs.read('undo.desc').splitlines()
1066 args = self.vfs.read('undo.desc').splitlines()
1066 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1067 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1067 if len(args) >= 3:
1068 if len(args) >= 3:
1068 detail = args[2]
1069 detail = args[2]
1069 oldtip = oldlen - 1
1070 oldtip = oldlen - 1
1070
1071
1071 if detail and ui.verbose:
1072 if detail and ui.verbose:
1072 msg = (_('repository tip rolled back to revision %s'
1073 msg = (_('repository tip rolled back to revision %s'
1073 ' (undo %s: %s)\n')
1074 ' (undo %s: %s)\n')
1074 % (oldtip, desc, detail))
1075 % (oldtip, desc, detail))
1075 else:
1076 else:
1076 msg = (_('repository tip rolled back to revision %s'
1077 msg = (_('repository tip rolled back to revision %s'
1077 ' (undo %s)\n')
1078 ' (undo %s)\n')
1078 % (oldtip, desc))
1079 % (oldtip, desc))
1079 except IOError:
1080 except IOError:
1080 msg = _('rolling back unknown transaction\n')
1081 msg = _('rolling back unknown transaction\n')
1081 desc = None
1082 desc = None
1082
1083
1083 if not force and self['.'] != self['tip'] and desc == 'commit':
1084 if not force and self['.'] != self['tip'] and desc == 'commit':
1084 raise util.Abort(
1085 raise util.Abort(
1085 _('rollback of last commit while not checked out '
1086 _('rollback of last commit while not checked out '
1086 'may lose data'), hint=_('use -f to force'))
1087 'may lose data'), hint=_('use -f to force'))
1087
1088
1088 ui.status(msg)
1089 ui.status(msg)
1089 if dryrun:
1090 if dryrun:
1090 return 0
1091 return 0
1091
1092
1092 parents = self.dirstate.parents()
1093 parents = self.dirstate.parents()
1093 self.destroying()
1094 self.destroying()
1094 vfsmap = {'plain': self.vfs, '': self.svfs}
1095 vfsmap = {'plain': self.vfs, '': self.svfs}
1095 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1096 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1096 if self.vfs.exists('undo.bookmarks'):
1097 if self.vfs.exists('undo.bookmarks'):
1097 self.vfs.rename('undo.bookmarks', 'bookmarks')
1098 self.vfs.rename('undo.bookmarks', 'bookmarks')
1098 if self.svfs.exists('undo.phaseroots'):
1099 if self.svfs.exists('undo.phaseroots'):
1099 self.svfs.rename('undo.phaseroots', 'phaseroots')
1100 self.svfs.rename('undo.phaseroots', 'phaseroots')
1100 self.invalidate()
1101 self.invalidate()
1101
1102
1102 parentgone = (parents[0] not in self.changelog.nodemap or
1103 parentgone = (parents[0] not in self.changelog.nodemap or
1103 parents[1] not in self.changelog.nodemap)
1104 parents[1] not in self.changelog.nodemap)
1104 if parentgone:
1105 if parentgone:
1105 self.vfs.rename('undo.dirstate', 'dirstate')
1106 self.vfs.rename('undo.dirstate', 'dirstate')
1106 try:
1107 try:
1107 branch = self.vfs.read('undo.branch')
1108 branch = self.vfs.read('undo.branch')
1108 self.dirstate.setbranch(encoding.tolocal(branch))
1109 self.dirstate.setbranch(encoding.tolocal(branch))
1109 except IOError:
1110 except IOError:
1110 ui.warn(_('named branch could not be reset: '
1111 ui.warn(_('named branch could not be reset: '
1111 'current branch is still \'%s\'\n')
1112 'current branch is still \'%s\'\n')
1112 % self.dirstate.branch())
1113 % self.dirstate.branch())
1113
1114
1114 self.dirstate.invalidate()
1115 self.dirstate.invalidate()
1115 parents = tuple([p.rev() for p in self.parents()])
1116 parents = tuple([p.rev() for p in self.parents()])
1116 if len(parents) > 1:
1117 if len(parents) > 1:
1117 ui.status(_('working directory now based on '
1118 ui.status(_('working directory now based on '
1118 'revisions %d and %d\n') % parents)
1119 'revisions %d and %d\n') % parents)
1119 else:
1120 else:
1120 ui.status(_('working directory now based on '
1121 ui.status(_('working directory now based on '
1121 'revision %d\n') % parents)
1122 'revision %d\n') % parents)
1122 ms = mergemod.mergestate(self)
1123 ms = mergemod.mergestate(self)
1123 ms.reset(self['.'].node())
1124 ms.reset(self['.'].node())
1124
1125
1125 # TODO: if we know which new heads may result from this rollback, pass
1126 # TODO: if we know which new heads may result from this rollback, pass
1126 # them to destroy(), which will prevent the branchhead cache from being
1127 # them to destroy(), which will prevent the branchhead cache from being
1127 # invalidated.
1128 # invalidated.
1128 self.destroyed()
1129 self.destroyed()
1129 return 0
1130 return 0
1130
1131
1131 def invalidatecaches(self):
1132 def invalidatecaches(self):
1132
1133
1133 if '_tagscache' in vars(self):
1134 if '_tagscache' in vars(self):
1134 # can't use delattr on proxy
1135 # can't use delattr on proxy
1135 del self.__dict__['_tagscache']
1136 del self.__dict__['_tagscache']
1136
1137
1137 self.unfiltered()._branchcaches.clear()
1138 self.unfiltered()._branchcaches.clear()
1138 self.invalidatevolatilesets()
1139 self.invalidatevolatilesets()
1139
1140
1140 def invalidatevolatilesets(self):
1141 def invalidatevolatilesets(self):
1141 self.filteredrevcache.clear()
1142 self.filteredrevcache.clear()
1142 obsolete.clearobscaches(self)
1143 obsolete.clearobscaches(self)
1143
1144
1144 def invalidatedirstate(self):
1145 def invalidatedirstate(self):
1145 '''Invalidates the dirstate, causing the next call to dirstate
1146 '''Invalidates the dirstate, causing the next call to dirstate
1146 to check if it was modified since the last time it was read,
1147 to check if it was modified since the last time it was read,
1147 rereading it if it has.
1148 rereading it if it has.
1148
1149
1149 This is different to dirstate.invalidate() that it doesn't always
1150 This is different to dirstate.invalidate() that it doesn't always
1150 rereads the dirstate. Use dirstate.invalidate() if you want to
1151 rereads the dirstate. Use dirstate.invalidate() if you want to
1151 explicitly read the dirstate again (i.e. restoring it to a previous
1152 explicitly read the dirstate again (i.e. restoring it to a previous
1152 known good state).'''
1153 known good state).'''
1153 if hasunfilteredcache(self, 'dirstate'):
1154 if hasunfilteredcache(self, 'dirstate'):
1154 for k in self.dirstate._filecache:
1155 for k in self.dirstate._filecache:
1155 try:
1156 try:
1156 delattr(self.dirstate, k)
1157 delattr(self.dirstate, k)
1157 except AttributeError:
1158 except AttributeError:
1158 pass
1159 pass
1159 delattr(self.unfiltered(), 'dirstate')
1160 delattr(self.unfiltered(), 'dirstate')
1160
1161
1161 def invalidate(self):
1162 def invalidate(self):
1162 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1163 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1163 for k in self._filecache:
1164 for k in self._filecache:
1164 # dirstate is invalidated separately in invalidatedirstate()
1165 # dirstate is invalidated separately in invalidatedirstate()
1165 if k == 'dirstate':
1166 if k == 'dirstate':
1166 continue
1167 continue
1167
1168
1168 try:
1169 try:
1169 delattr(unfiltered, k)
1170 delattr(unfiltered, k)
1170 except AttributeError:
1171 except AttributeError:
1171 pass
1172 pass
1172 self.invalidatecaches()
1173 self.invalidatecaches()
1173 self.store.invalidatecaches()
1174 self.store.invalidatecaches()
1174
1175
1175 def invalidateall(self):
1176 def invalidateall(self):
1176 '''Fully invalidates both store and non-store parts, causing the
1177 '''Fully invalidates both store and non-store parts, causing the
1177 subsequent operation to reread any outside changes.'''
1178 subsequent operation to reread any outside changes.'''
1178 # extension should hook this to invalidate its caches
1179 # extension should hook this to invalidate its caches
1179 self.invalidate()
1180 self.invalidate()
1180 self.invalidatedirstate()
1181 self.invalidatedirstate()
1181
1182
1182 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1183 try:
1184 try:
1184 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1185 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1185 except error.LockHeld, inst:
1186 except error.LockHeld, inst:
1186 if not wait:
1187 if not wait:
1187 raise
1188 raise
1188 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1189 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1189 (desc, inst.locker))
1190 (desc, inst.locker))
1190 # default to 600 seconds timeout
1191 # default to 600 seconds timeout
1191 l = lockmod.lock(vfs, lockname,
1192 l = lockmod.lock(vfs, lockname,
1192 int(self.ui.config("ui", "timeout", "600")),
1193 int(self.ui.config("ui", "timeout", "600")),
1193 releasefn, desc=desc)
1194 releasefn, desc=desc)
1194 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1195 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1195 if acquirefn:
1196 if acquirefn:
1196 acquirefn()
1197 acquirefn()
1197 return l
1198 return l
1198
1199
1199 def _afterlock(self, callback):
1200 def _afterlock(self, callback):
1200 """add a callback to be run when the repository is fully unlocked
1201 """add a callback to be run when the repository is fully unlocked
1201
1202
1202 The callback will be executed when the outermost lock is released
1203 The callback will be executed when the outermost lock is released
1203 (with wlock being higher level than 'lock')."""
1204 (with wlock being higher level than 'lock')."""
1204 for ref in (self._wlockref, self._lockref):
1205 for ref in (self._wlockref, self._lockref):
1205 l = ref and ref()
1206 l = ref and ref()
1206 if l and l.held:
1207 if l and l.held:
1207 l.postrelease.append(callback)
1208 l.postrelease.append(callback)
1208 break
1209 break
1209 else: # no lock have been found.
1210 else: # no lock have been found.
1210 callback()
1211 callback()
1211
1212
1212 def lock(self, wait=True):
1213 def lock(self, wait=True):
1213 '''Lock the repository store (.hg/store) and return a weak reference
1214 '''Lock the repository store (.hg/store) and return a weak reference
1214 to the lock. Use this before modifying the store (e.g. committing or
1215 to the lock. Use this before modifying the store (e.g. committing or
1215 stripping). If you are opening a transaction, get a lock as well.)
1216 stripping). If you are opening a transaction, get a lock as well.)
1216
1217
1217 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1218 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1218 'wlock' first to avoid a dead-lock hazard.'''
1219 'wlock' first to avoid a dead-lock hazard.'''
1219 l = self._lockref and self._lockref()
1220 l = self._lockref and self._lockref()
1220 if l is not None and l.held:
1221 if l is not None and l.held:
1221 l.lock()
1222 l.lock()
1222 return l
1223 return l
1223
1224
1224 def unlock():
1225 def unlock():
1225 for k, ce in self._filecache.items():
1226 for k, ce in self._filecache.items():
1226 if k == 'dirstate' or k not in self.__dict__:
1227 if k == 'dirstate' or k not in self.__dict__:
1227 continue
1228 continue
1228 ce.refresh()
1229 ce.refresh()
1229
1230
1230 l = self._lock(self.svfs, "lock", wait, unlock,
1231 l = self._lock(self.svfs, "lock", wait, unlock,
1231 self.invalidate, _('repository %s') % self.origroot)
1232 self.invalidate, _('repository %s') % self.origroot)
1232 self._lockref = weakref.ref(l)
1233 self._lockref = weakref.ref(l)
1233 return l
1234 return l
1234
1235
1235 def wlock(self, wait=True):
1236 def wlock(self, wait=True):
1236 '''Lock the non-store parts of the repository (everything under
1237 '''Lock the non-store parts of the repository (everything under
1237 .hg except .hg/store) and return a weak reference to the lock.
1238 .hg except .hg/store) and return a weak reference to the lock.
1238
1239
1239 Use this before modifying files in .hg.
1240 Use this before modifying files in .hg.
1240
1241
1241 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1242 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1242 'wlock' first to avoid a dead-lock hazard.'''
1243 'wlock' first to avoid a dead-lock hazard.'''
1243 l = self._wlockref and self._wlockref()
1244 l = self._wlockref and self._wlockref()
1244 if l is not None and l.held:
1245 if l is not None and l.held:
1245 l.lock()
1246 l.lock()
1246 return l
1247 return l
1247
1248
1248 # We do not need to check for non-waiting lock aquisition. Such
1249 # We do not need to check for non-waiting lock aquisition. Such
1249 # acquisition would not cause dead-lock as they would just fail.
1250 # acquisition would not cause dead-lock as they would just fail.
1250 if wait and (self.ui.configbool('devel', 'all')
1251 if wait and (self.ui.configbool('devel', 'all')
1251 or self.ui.configbool('devel', 'check-locks')):
1252 or self.ui.configbool('devel', 'check-locks')):
1252 l = self._lockref and self._lockref()
1253 l = self._lockref and self._lockref()
1253 if l is not None and l.held:
1254 if l is not None and l.held:
1254 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1255 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1255
1256
1256 def unlock():
1257 def unlock():
1257 if self.dirstate.pendingparentchange():
1258 if self.dirstate.pendingparentchange():
1258 self.dirstate.invalidate()
1259 self.dirstate.invalidate()
1259 else:
1260 else:
1260 self.dirstate.write()
1261 self.dirstate.write()
1261
1262
1262 self._filecache['dirstate'].refresh()
1263 self._filecache['dirstate'].refresh()
1263
1264
1264 l = self._lock(self.vfs, "wlock", wait, unlock,
1265 l = self._lock(self.vfs, "wlock", wait, unlock,
1265 self.invalidatedirstate, _('working directory of %s') %
1266 self.invalidatedirstate, _('working directory of %s') %
1266 self.origroot)
1267 self.origroot)
1267 self._wlockref = weakref.ref(l)
1268 self._wlockref = weakref.ref(l)
1268 return l
1269 return l
1269
1270
1270 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1271 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1271 """
1272 """
1272 commit an individual file as part of a larger transaction
1273 commit an individual file as part of a larger transaction
1273 """
1274 """
1274
1275
1275 fname = fctx.path()
1276 fname = fctx.path()
1276 fparent1 = manifest1.get(fname, nullid)
1277 fparent1 = manifest1.get(fname, nullid)
1277 fparent2 = manifest2.get(fname, nullid)
1278 fparent2 = manifest2.get(fname, nullid)
1278 if isinstance(fctx, context.filectx):
1279 if isinstance(fctx, context.filectx):
1279 node = fctx.filenode()
1280 node = fctx.filenode()
1280 if node in [fparent1, fparent2]:
1281 if node in [fparent1, fparent2]:
1281 self.ui.debug('reusing %s filelog entry\n' % fname)
1282 self.ui.debug('reusing %s filelog entry\n' % fname)
1282 return node
1283 return node
1283
1284
1284 flog = self.file(fname)
1285 flog = self.file(fname)
1285 meta = {}
1286 meta = {}
1286 copy = fctx.renamed()
1287 copy = fctx.renamed()
1287 if copy and copy[0] != fname:
1288 if copy and copy[0] != fname:
1288 # Mark the new revision of this file as a copy of another
1289 # Mark the new revision of this file as a copy of another
1289 # file. This copy data will effectively act as a parent
1290 # file. This copy data will effectively act as a parent
1290 # of this new revision. If this is a merge, the first
1291 # of this new revision. If this is a merge, the first
1291 # parent will be the nullid (meaning "look up the copy data")
1292 # parent will be the nullid (meaning "look up the copy data")
1292 # and the second one will be the other parent. For example:
1293 # and the second one will be the other parent. For example:
1293 #
1294 #
1294 # 0 --- 1 --- 3 rev1 changes file foo
1295 # 0 --- 1 --- 3 rev1 changes file foo
1295 # \ / rev2 renames foo to bar and changes it
1296 # \ / rev2 renames foo to bar and changes it
1296 # \- 2 -/ rev3 should have bar with all changes and
1297 # \- 2 -/ rev3 should have bar with all changes and
1297 # should record that bar descends from
1298 # should record that bar descends from
1298 # bar in rev2 and foo in rev1
1299 # bar in rev2 and foo in rev1
1299 #
1300 #
1300 # this allows this merge to succeed:
1301 # this allows this merge to succeed:
1301 #
1302 #
1302 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1303 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1303 # \ / merging rev3 and rev4 should use bar@rev2
1304 # \ / merging rev3 and rev4 should use bar@rev2
1304 # \- 2 --- 4 as the merge base
1305 # \- 2 --- 4 as the merge base
1305 #
1306 #
1306
1307
1307 cfname = copy[0]
1308 cfname = copy[0]
1308 crev = manifest1.get(cfname)
1309 crev = manifest1.get(cfname)
1309 newfparent = fparent2
1310 newfparent = fparent2
1310
1311
1311 if manifest2: # branch merge
1312 if manifest2: # branch merge
1312 if fparent2 == nullid or crev is None: # copied on remote side
1313 if fparent2 == nullid or crev is None: # copied on remote side
1313 if cfname in manifest2:
1314 if cfname in manifest2:
1314 crev = manifest2[cfname]
1315 crev = manifest2[cfname]
1315 newfparent = fparent1
1316 newfparent = fparent1
1316
1317
1317 # Here, we used to search backwards through history to try to find
1318 # Here, we used to search backwards through history to try to find
1318 # where the file copy came from if the source of a copy was not in
1319 # where the file copy came from if the source of a copy was not in
1319 # the parent directory. However, this doesn't actually make sense to
1320 # the parent directory. However, this doesn't actually make sense to
1320 # do (what does a copy from something not in your working copy even
1321 # do (what does a copy from something not in your working copy even
1321 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1322 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1322 # the user that copy information was dropped, so if they didn't
1323 # the user that copy information was dropped, so if they didn't
1323 # expect this outcome it can be fixed, but this is the correct
1324 # expect this outcome it can be fixed, but this is the correct
1324 # behavior in this circumstance.
1325 # behavior in this circumstance.
1325
1326
1326 if crev:
1327 if crev:
1327 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1328 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1328 meta["copy"] = cfname
1329 meta["copy"] = cfname
1329 meta["copyrev"] = hex(crev)
1330 meta["copyrev"] = hex(crev)
1330 fparent1, fparent2 = nullid, newfparent
1331 fparent1, fparent2 = nullid, newfparent
1331 else:
1332 else:
1332 self.ui.warn(_("warning: can't find ancestor for '%s' "
1333 self.ui.warn(_("warning: can't find ancestor for '%s' "
1333 "copied from '%s'!\n") % (fname, cfname))
1334 "copied from '%s'!\n") % (fname, cfname))
1334
1335
1335 elif fparent1 == nullid:
1336 elif fparent1 == nullid:
1336 fparent1, fparent2 = fparent2, nullid
1337 fparent1, fparent2 = fparent2, nullid
1337 elif fparent2 != nullid:
1338 elif fparent2 != nullid:
1338 # is one parent an ancestor of the other?
1339 # is one parent an ancestor of the other?
1339 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1340 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1340 if fparent1 in fparentancestors:
1341 if fparent1 in fparentancestors:
1341 fparent1, fparent2 = fparent2, nullid
1342 fparent1, fparent2 = fparent2, nullid
1342 elif fparent2 in fparentancestors:
1343 elif fparent2 in fparentancestors:
1343 fparent2 = nullid
1344 fparent2 = nullid
1344
1345
1345 # is the file changed?
1346 # is the file changed?
1346 text = fctx.data()
1347 text = fctx.data()
1347 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1348 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1348 changelist.append(fname)
1349 changelist.append(fname)
1349 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1350 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1350 # are just the flags changed during merge?
1351 # are just the flags changed during merge?
1351 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1352 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1352 changelist.append(fname)
1353 changelist.append(fname)
1353
1354
1354 return fparent1
1355 return fparent1
1355
1356
1356 @unfilteredmethod
1357 @unfilteredmethod
1357 def commit(self, text="", user=None, date=None, match=None, force=False,
1358 def commit(self, text="", user=None, date=None, match=None, force=False,
1358 editor=False, extra={}):
1359 editor=False, extra={}):
1359 """Add a new revision to current repository.
1360 """Add a new revision to current repository.
1360
1361
1361 Revision information is gathered from the working directory,
1362 Revision information is gathered from the working directory,
1362 match can be used to filter the committed files. If editor is
1363 match can be used to filter the committed files. If editor is
1363 supplied, it is called to get a commit message.
1364 supplied, it is called to get a commit message.
1364 """
1365 """
1365
1366
1366 def fail(f, msg):
1367 def fail(f, msg):
1367 raise util.Abort('%s: %s' % (f, msg))
1368 raise util.Abort('%s: %s' % (f, msg))
1368
1369
1369 if not match:
1370 if not match:
1370 match = matchmod.always(self.root, '')
1371 match = matchmod.always(self.root, '')
1371
1372
1372 if not force:
1373 if not force:
1373 vdirs = []
1374 vdirs = []
1374 match.explicitdir = vdirs.append
1375 match.explicitdir = vdirs.append
1375 match.bad = fail
1376 match.bad = fail
1376
1377
1377 wlock = self.wlock()
1378 wlock = self.wlock()
1378 try:
1379 try:
1379 wctx = self[None]
1380 wctx = self[None]
1380 merge = len(wctx.parents()) > 1
1381 merge = len(wctx.parents()) > 1
1381
1382
1382 if not force and merge and not match.always():
1383 if not force and merge and not match.always():
1383 raise util.Abort(_('cannot partially commit a merge '
1384 raise util.Abort(_('cannot partially commit a merge '
1384 '(do not specify files or patterns)'))
1385 '(do not specify files or patterns)'))
1385
1386
1386 status = self.status(match=match, clean=force)
1387 status = self.status(match=match, clean=force)
1387 if force:
1388 if force:
1388 status.modified.extend(status.clean) # mq may commit clean files
1389 status.modified.extend(status.clean) # mq may commit clean files
1389
1390
1390 # check subrepos
1391 # check subrepos
1391 subs = []
1392 subs = []
1392 commitsubs = set()
1393 commitsubs = set()
1393 newstate = wctx.substate.copy()
1394 newstate = wctx.substate.copy()
1394 # only manage subrepos and .hgsubstate if .hgsub is present
1395 # only manage subrepos and .hgsubstate if .hgsub is present
1395 if '.hgsub' in wctx:
1396 if '.hgsub' in wctx:
1396 # we'll decide whether to track this ourselves, thanks
1397 # we'll decide whether to track this ourselves, thanks
1397 for c in status.modified, status.added, status.removed:
1398 for c in status.modified, status.added, status.removed:
1398 if '.hgsubstate' in c:
1399 if '.hgsubstate' in c:
1399 c.remove('.hgsubstate')
1400 c.remove('.hgsubstate')
1400
1401
1401 # compare current state to last committed state
1402 # compare current state to last committed state
1402 # build new substate based on last committed state
1403 # build new substate based on last committed state
1403 oldstate = wctx.p1().substate
1404 oldstate = wctx.p1().substate
1404 for s in sorted(newstate.keys()):
1405 for s in sorted(newstate.keys()):
1405 if not match(s):
1406 if not match(s):
1406 # ignore working copy, use old state if present
1407 # ignore working copy, use old state if present
1407 if s in oldstate:
1408 if s in oldstate:
1408 newstate[s] = oldstate[s]
1409 newstate[s] = oldstate[s]
1409 continue
1410 continue
1410 if not force:
1411 if not force:
1411 raise util.Abort(
1412 raise util.Abort(
1412 _("commit with new subrepo %s excluded") % s)
1413 _("commit with new subrepo %s excluded") % s)
1413 dirtyreason = wctx.sub(s).dirtyreason(True)
1414 dirtyreason = wctx.sub(s).dirtyreason(True)
1414 if dirtyreason:
1415 if dirtyreason:
1415 if not self.ui.configbool('ui', 'commitsubrepos'):
1416 if not self.ui.configbool('ui', 'commitsubrepos'):
1416 raise util.Abort(dirtyreason,
1417 raise util.Abort(dirtyreason,
1417 hint=_("use --subrepos for recursive commit"))
1418 hint=_("use --subrepos for recursive commit"))
1418 subs.append(s)
1419 subs.append(s)
1419 commitsubs.add(s)
1420 commitsubs.add(s)
1420 else:
1421 else:
1421 bs = wctx.sub(s).basestate()
1422 bs = wctx.sub(s).basestate()
1422 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1423 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1423 if oldstate.get(s, (None, None, None))[1] != bs:
1424 if oldstate.get(s, (None, None, None))[1] != bs:
1424 subs.append(s)
1425 subs.append(s)
1425
1426
1426 # check for removed subrepos
1427 # check for removed subrepos
1427 for p in wctx.parents():
1428 for p in wctx.parents():
1428 r = [s for s in p.substate if s not in newstate]
1429 r = [s for s in p.substate if s not in newstate]
1429 subs += [s for s in r if match(s)]
1430 subs += [s for s in r if match(s)]
1430 if subs:
1431 if subs:
1431 if (not match('.hgsub') and
1432 if (not match('.hgsub') and
1432 '.hgsub' in (wctx.modified() + wctx.added())):
1433 '.hgsub' in (wctx.modified() + wctx.added())):
1433 raise util.Abort(
1434 raise util.Abort(
1434 _("can't commit subrepos without .hgsub"))
1435 _("can't commit subrepos without .hgsub"))
1435 status.modified.insert(0, '.hgsubstate')
1436 status.modified.insert(0, '.hgsubstate')
1436
1437
1437 elif '.hgsub' in status.removed:
1438 elif '.hgsub' in status.removed:
1438 # clean up .hgsubstate when .hgsub is removed
1439 # clean up .hgsubstate when .hgsub is removed
1439 if ('.hgsubstate' in wctx and
1440 if ('.hgsubstate' in wctx and
1440 '.hgsubstate' not in (status.modified + status.added +
1441 '.hgsubstate' not in (status.modified + status.added +
1441 status.removed)):
1442 status.removed)):
1442 status.removed.insert(0, '.hgsubstate')
1443 status.removed.insert(0, '.hgsubstate')
1443
1444
1444 # make sure all explicit patterns are matched
1445 # make sure all explicit patterns are matched
1445 if not force and match.files():
1446 if not force and match.files():
1446 matched = set(status.modified + status.added + status.removed)
1447 matched = set(status.modified + status.added + status.removed)
1447
1448
1448 for f in match.files():
1449 for f in match.files():
1449 f = self.dirstate.normalize(f)
1450 f = self.dirstate.normalize(f)
1450 if f == '.' or f in matched or f in wctx.substate:
1451 if f == '.' or f in matched or f in wctx.substate:
1451 continue
1452 continue
1452 if f in status.deleted:
1453 if f in status.deleted:
1453 fail(f, _('file not found!'))
1454 fail(f, _('file not found!'))
1454 if f in vdirs: # visited directory
1455 if f in vdirs: # visited directory
1455 d = f + '/'
1456 d = f + '/'
1456 for mf in matched:
1457 for mf in matched:
1457 if mf.startswith(d):
1458 if mf.startswith(d):
1458 break
1459 break
1459 else:
1460 else:
1460 fail(f, _("no match under directory!"))
1461 fail(f, _("no match under directory!"))
1461 elif f not in self.dirstate:
1462 elif f not in self.dirstate:
1462 fail(f, _("file not tracked!"))
1463 fail(f, _("file not tracked!"))
1463
1464
1464 cctx = context.workingcommitctx(self, status,
1465 cctx = context.workingcommitctx(self, status,
1465 text, user, date, extra)
1466 text, user, date, extra)
1466
1467
1467 if (not force and not extra.get("close") and not merge
1468 if (not force and not extra.get("close") and not merge
1468 and not cctx.files()
1469 and not cctx.files()
1469 and wctx.branch() == wctx.p1().branch()):
1470 and wctx.branch() == wctx.p1().branch()):
1470 return None
1471 return None
1471
1472
1472 if merge and cctx.deleted():
1473 if merge and cctx.deleted():
1473 raise util.Abort(_("cannot commit merge with missing files"))
1474 raise util.Abort(_("cannot commit merge with missing files"))
1474
1475
1475 ms = mergemod.mergestate(self)
1476 ms = mergemod.mergestate(self)
1476 for f in status.modified:
1477 for f in status.modified:
1477 if f in ms and ms[f] == 'u':
1478 if f in ms and ms[f] == 'u':
1478 raise util.Abort(_('unresolved merge conflicts '
1479 raise util.Abort(_('unresolved merge conflicts '
1479 '(see "hg help resolve")'))
1480 '(see "hg help resolve")'))
1480
1481
1481 if editor:
1482 if editor:
1482 cctx._text = editor(self, cctx, subs)
1483 cctx._text = editor(self, cctx, subs)
1483 edited = (text != cctx._text)
1484 edited = (text != cctx._text)
1484
1485
1485 # Save commit message in case this transaction gets rolled back
1486 # Save commit message in case this transaction gets rolled back
1486 # (e.g. by a pretxncommit hook). Leave the content alone on
1487 # (e.g. by a pretxncommit hook). Leave the content alone on
1487 # the assumption that the user will use the same editor again.
1488 # the assumption that the user will use the same editor again.
1488 msgfn = self.savecommitmessage(cctx._text)
1489 msgfn = self.savecommitmessage(cctx._text)
1489
1490
1490 # commit subs and write new state
1491 # commit subs and write new state
1491 if subs:
1492 if subs:
1492 for s in sorted(commitsubs):
1493 for s in sorted(commitsubs):
1493 sub = wctx.sub(s)
1494 sub = wctx.sub(s)
1494 self.ui.status(_('committing subrepository %s\n') %
1495 self.ui.status(_('committing subrepository %s\n') %
1495 subrepo.subrelpath(sub))
1496 subrepo.subrelpath(sub))
1496 sr = sub.commit(cctx._text, user, date)
1497 sr = sub.commit(cctx._text, user, date)
1497 newstate[s] = (newstate[s][0], sr)
1498 newstate[s] = (newstate[s][0], sr)
1498 subrepo.writestate(self, newstate)
1499 subrepo.writestate(self, newstate)
1499
1500
1500 p1, p2 = self.dirstate.parents()
1501 p1, p2 = self.dirstate.parents()
1501 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1502 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1502 try:
1503 try:
1503 self.hook("precommit", throw=True, parent1=hookp1,
1504 self.hook("precommit", throw=True, parent1=hookp1,
1504 parent2=hookp2)
1505 parent2=hookp2)
1505 ret = self.commitctx(cctx, True)
1506 ret = self.commitctx(cctx, True)
1506 except: # re-raises
1507 except: # re-raises
1507 if edited:
1508 if edited:
1508 self.ui.write(
1509 self.ui.write(
1509 _('note: commit message saved in %s\n') % msgfn)
1510 _('note: commit message saved in %s\n') % msgfn)
1510 raise
1511 raise
1511
1512
1512 # update bookmarks, dirstate and mergestate
1513 # update bookmarks, dirstate and mergestate
1513 bookmarks.update(self, [p1, p2], ret)
1514 bookmarks.update(self, [p1, p2], ret)
1514 cctx.markcommitted(ret)
1515 cctx.markcommitted(ret)
1515 ms.reset()
1516 ms.reset()
1516 finally:
1517 finally:
1517 wlock.release()
1518 wlock.release()
1518
1519
1519 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1520 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1520 # hack for command that use a temporary commit (eg: histedit)
1521 # hack for command that use a temporary commit (eg: histedit)
1521 # temporary commit got stripped before hook release
1522 # temporary commit got stripped before hook release
1522 if node in self:
1523 if node in self:
1523 self.hook("commit", node=node, parent1=parent1,
1524 self.hook("commit", node=node, parent1=parent1,
1524 parent2=parent2)
1525 parent2=parent2)
1525 self._afterlock(commithook)
1526 self._afterlock(commithook)
1526 return ret
1527 return ret
1527
1528
1528 @unfilteredmethod
1529 @unfilteredmethod
1529 def commitctx(self, ctx, error=False):
1530 def commitctx(self, ctx, error=False):
1530 """Add a new revision to current repository.
1531 """Add a new revision to current repository.
1531 Revision information is passed via the context argument.
1532 Revision information is passed via the context argument.
1532 """
1533 """
1533
1534
1534 tr = None
1535 tr = None
1535 p1, p2 = ctx.p1(), ctx.p2()
1536 p1, p2 = ctx.p1(), ctx.p2()
1536 user = ctx.user()
1537 user = ctx.user()
1537
1538
1538 lock = self.lock()
1539 lock = self.lock()
1539 try:
1540 try:
1540 tr = self.transaction("commit")
1541 tr = self.transaction("commit")
1541 trp = weakref.proxy(tr)
1542 trp = weakref.proxy(tr)
1542
1543
1543 if ctx.files():
1544 if ctx.files():
1544 m1 = p1.manifest()
1545 m1 = p1.manifest()
1545 m2 = p2.manifest()
1546 m2 = p2.manifest()
1546 m = m1.copy()
1547 m = m1.copy()
1547
1548
1548 # check in files
1549 # check in files
1549 added = []
1550 added = []
1550 changed = []
1551 changed = []
1551 removed = list(ctx.removed())
1552 removed = list(ctx.removed())
1552 linkrev = len(self)
1553 linkrev = len(self)
1553 self.ui.note(_("committing files:\n"))
1554 self.ui.note(_("committing files:\n"))
1554 for f in sorted(ctx.modified() + ctx.added()):
1555 for f in sorted(ctx.modified() + ctx.added()):
1555 self.ui.note(f + "\n")
1556 self.ui.note(f + "\n")
1556 try:
1557 try:
1557 fctx = ctx[f]
1558 fctx = ctx[f]
1558 if fctx is None:
1559 if fctx is None:
1559 removed.append(f)
1560 removed.append(f)
1560 else:
1561 else:
1561 added.append(f)
1562 added.append(f)
1562 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1563 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1563 trp, changed)
1564 trp, changed)
1564 m.setflag(f, fctx.flags())
1565 m.setflag(f, fctx.flags())
1565 except OSError, inst:
1566 except OSError, inst:
1566 self.ui.warn(_("trouble committing %s!\n") % f)
1567 self.ui.warn(_("trouble committing %s!\n") % f)
1567 raise
1568 raise
1568 except IOError, inst:
1569 except IOError, inst:
1569 errcode = getattr(inst, 'errno', errno.ENOENT)
1570 errcode = getattr(inst, 'errno', errno.ENOENT)
1570 if error or errcode and errcode != errno.ENOENT:
1571 if error or errcode and errcode != errno.ENOENT:
1571 self.ui.warn(_("trouble committing %s!\n") % f)
1572 self.ui.warn(_("trouble committing %s!\n") % f)
1572 raise
1573 raise
1573
1574
1574 # update manifest
1575 # update manifest
1575 self.ui.note(_("committing manifest\n"))
1576 self.ui.note(_("committing manifest\n"))
1576 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1577 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1577 drop = [f for f in removed if f in m]
1578 drop = [f for f in removed if f in m]
1578 for f in drop:
1579 for f in drop:
1579 del m[f]
1580 del m[f]
1580 mn = self.manifest.add(m, trp, linkrev,
1581 mn = self.manifest.add(m, trp, linkrev,
1581 p1.manifestnode(), p2.manifestnode(),
1582 p1.manifestnode(), p2.manifestnode(),
1582 added, drop)
1583 added, drop)
1583 files = changed + removed
1584 files = changed + removed
1584 else:
1585 else:
1585 mn = p1.manifestnode()
1586 mn = p1.manifestnode()
1586 files = []
1587 files = []
1587
1588
1588 # update changelog
1589 # update changelog
1589 self.ui.note(_("committing changelog\n"))
1590 self.ui.note(_("committing changelog\n"))
1590 self.changelog.delayupdate(tr)
1591 self.changelog.delayupdate(tr)
1591 n = self.changelog.add(mn, files, ctx.description(),
1592 n = self.changelog.add(mn, files, ctx.description(),
1592 trp, p1.node(), p2.node(),
1593 trp, p1.node(), p2.node(),
1593 user, ctx.date(), ctx.extra().copy())
1594 user, ctx.date(), ctx.extra().copy())
1594 p = lambda: tr.writepending() and self.root or ""
1595 p = lambda: tr.writepending() and self.root or ""
1595 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1596 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1596 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1597 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1597 parent2=xp2, pending=p)
1598 parent2=xp2, pending=p)
1598 # set the new commit is proper phase
1599 # set the new commit is proper phase
1599 targetphase = subrepo.newcommitphase(self.ui, ctx)
1600 targetphase = subrepo.newcommitphase(self.ui, ctx)
1600 if targetphase:
1601 if targetphase:
1601 # retract boundary do not alter parent changeset.
1602 # retract boundary do not alter parent changeset.
1602 # if a parent have higher the resulting phase will
1603 # if a parent have higher the resulting phase will
1603 # be compliant anyway
1604 # be compliant anyway
1604 #
1605 #
1605 # if minimal phase was 0 we don't need to retract anything
1606 # if minimal phase was 0 we don't need to retract anything
1606 phases.retractboundary(self, tr, targetphase, [n])
1607 phases.retractboundary(self, tr, targetphase, [n])
1607 tr.close()
1608 tr.close()
1608 branchmap.updatecache(self.filtered('served'))
1609 branchmap.updatecache(self.filtered('served'))
1609 return n
1610 return n
1610 finally:
1611 finally:
1611 if tr:
1612 if tr:
1612 tr.release()
1613 tr.release()
1613 lock.release()
1614 lock.release()
1614
1615
1615 @unfilteredmethod
1616 @unfilteredmethod
1616 def destroying(self):
1617 def destroying(self):
1617 '''Inform the repository that nodes are about to be destroyed.
1618 '''Inform the repository that nodes are about to be destroyed.
1618 Intended for use by strip and rollback, so there's a common
1619 Intended for use by strip and rollback, so there's a common
1619 place for anything that has to be done before destroying history.
1620 place for anything that has to be done before destroying history.
1620
1621
1621 This is mostly useful for saving state that is in memory and waiting
1622 This is mostly useful for saving state that is in memory and waiting
1622 to be flushed when the current lock is released. Because a call to
1623 to be flushed when the current lock is released. Because a call to
1623 destroyed is imminent, the repo will be invalidated causing those
1624 destroyed is imminent, the repo will be invalidated causing those
1624 changes to stay in memory (waiting for the next unlock), or vanish
1625 changes to stay in memory (waiting for the next unlock), or vanish
1625 completely.
1626 completely.
1626 '''
1627 '''
1627 # When using the same lock to commit and strip, the phasecache is left
1628 # When using the same lock to commit and strip, the phasecache is left
1628 # dirty after committing. Then when we strip, the repo is invalidated,
1629 # dirty after committing. Then when we strip, the repo is invalidated,
1629 # causing those changes to disappear.
1630 # causing those changes to disappear.
1630 if '_phasecache' in vars(self):
1631 if '_phasecache' in vars(self):
1631 self._phasecache.write()
1632 self._phasecache.write()
1632
1633
1633 @unfilteredmethod
1634 @unfilteredmethod
1634 def destroyed(self):
1635 def destroyed(self):
1635 '''Inform the repository that nodes have been destroyed.
1636 '''Inform the repository that nodes have been destroyed.
1636 Intended for use by strip and rollback, so there's a common
1637 Intended for use by strip and rollback, so there's a common
1637 place for anything that has to be done after destroying history.
1638 place for anything that has to be done after destroying history.
1638 '''
1639 '''
1639 # When one tries to:
1640 # When one tries to:
1640 # 1) destroy nodes thus calling this method (e.g. strip)
1641 # 1) destroy nodes thus calling this method (e.g. strip)
1641 # 2) use phasecache somewhere (e.g. commit)
1642 # 2) use phasecache somewhere (e.g. commit)
1642 #
1643 #
1643 # then 2) will fail because the phasecache contains nodes that were
1644 # then 2) will fail because the phasecache contains nodes that were
1644 # removed. We can either remove phasecache from the filecache,
1645 # removed. We can either remove phasecache from the filecache,
1645 # causing it to reload next time it is accessed, or simply filter
1646 # causing it to reload next time it is accessed, or simply filter
1646 # the removed nodes now and write the updated cache.
1647 # the removed nodes now and write the updated cache.
1647 self._phasecache.filterunknown(self)
1648 self._phasecache.filterunknown(self)
1648 self._phasecache.write()
1649 self._phasecache.write()
1649
1650
1650 # update the 'served' branch cache to help read only server process
1651 # update the 'served' branch cache to help read only server process
1651 # Thanks to branchcache collaboration this is done from the nearest
1652 # Thanks to branchcache collaboration this is done from the nearest
1652 # filtered subset and it is expected to be fast.
1653 # filtered subset and it is expected to be fast.
1653 branchmap.updatecache(self.filtered('served'))
1654 branchmap.updatecache(self.filtered('served'))
1654
1655
1655 # Ensure the persistent tag cache is updated. Doing it now
1656 # Ensure the persistent tag cache is updated. Doing it now
1656 # means that the tag cache only has to worry about destroyed
1657 # means that the tag cache only has to worry about destroyed
1657 # heads immediately after a strip/rollback. That in turn
1658 # heads immediately after a strip/rollback. That in turn
1658 # guarantees that "cachetip == currenttip" (comparing both rev
1659 # guarantees that "cachetip == currenttip" (comparing both rev
1659 # and node) always means no nodes have been added or destroyed.
1660 # and node) always means no nodes have been added or destroyed.
1660
1661
1661 # XXX this is suboptimal when qrefresh'ing: we strip the current
1662 # XXX this is suboptimal when qrefresh'ing: we strip the current
1662 # head, refresh the tag cache, then immediately add a new head.
1663 # head, refresh the tag cache, then immediately add a new head.
1663 # But I think doing it this way is necessary for the "instant
1664 # But I think doing it this way is necessary for the "instant
1664 # tag cache retrieval" case to work.
1665 # tag cache retrieval" case to work.
1665 self.invalidate()
1666 self.invalidate()
1666
1667
1667 def walk(self, match, node=None):
1668 def walk(self, match, node=None):
1668 '''
1669 '''
1669 walk recursively through the directory tree or a given
1670 walk recursively through the directory tree or a given
1670 changeset, finding all files matched by the match
1671 changeset, finding all files matched by the match
1671 function
1672 function
1672 '''
1673 '''
1673 return self[node].walk(match)
1674 return self[node].walk(match)
1674
1675
1675 def status(self, node1='.', node2=None, match=None,
1676 def status(self, node1='.', node2=None, match=None,
1676 ignored=False, clean=False, unknown=False,
1677 ignored=False, clean=False, unknown=False,
1677 listsubrepos=False):
1678 listsubrepos=False):
1678 '''a convenience method that calls node1.status(node2)'''
1679 '''a convenience method that calls node1.status(node2)'''
1679 return self[node1].status(node2, match, ignored, clean, unknown,
1680 return self[node1].status(node2, match, ignored, clean, unknown,
1680 listsubrepos)
1681 listsubrepos)
1681
1682
1682 def heads(self, start=None):
1683 def heads(self, start=None):
1683 heads = self.changelog.heads(start)
1684 heads = self.changelog.heads(start)
1684 # sort the output in rev descending order
1685 # sort the output in rev descending order
1685 return sorted(heads, key=self.changelog.rev, reverse=True)
1686 return sorted(heads, key=self.changelog.rev, reverse=True)
1686
1687
1687 def branchheads(self, branch=None, start=None, closed=False):
1688 def branchheads(self, branch=None, start=None, closed=False):
1688 '''return a (possibly filtered) list of heads for the given branch
1689 '''return a (possibly filtered) list of heads for the given branch
1689
1690
1690 Heads are returned in topological order, from newest to oldest.
1691 Heads are returned in topological order, from newest to oldest.
1691 If branch is None, use the dirstate branch.
1692 If branch is None, use the dirstate branch.
1692 If start is not None, return only heads reachable from start.
1693 If start is not None, return only heads reachable from start.
1693 If closed is True, return heads that are marked as closed as well.
1694 If closed is True, return heads that are marked as closed as well.
1694 '''
1695 '''
1695 if branch is None:
1696 if branch is None:
1696 branch = self[None].branch()
1697 branch = self[None].branch()
1697 branches = self.branchmap()
1698 branches = self.branchmap()
1698 if branch not in branches:
1699 if branch not in branches:
1699 return []
1700 return []
1700 # the cache returns heads ordered lowest to highest
1701 # the cache returns heads ordered lowest to highest
1701 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1702 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1702 if start is not None:
1703 if start is not None:
1703 # filter out the heads that cannot be reached from startrev
1704 # filter out the heads that cannot be reached from startrev
1704 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1705 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1705 bheads = [h for h in bheads if h in fbheads]
1706 bheads = [h for h in bheads if h in fbheads]
1706 return bheads
1707 return bheads
1707
1708
1708 def branches(self, nodes):
1709 def branches(self, nodes):
1709 if not nodes:
1710 if not nodes:
1710 nodes = [self.changelog.tip()]
1711 nodes = [self.changelog.tip()]
1711 b = []
1712 b = []
1712 for n in nodes:
1713 for n in nodes:
1713 t = n
1714 t = n
1714 while True:
1715 while True:
1715 p = self.changelog.parents(n)
1716 p = self.changelog.parents(n)
1716 if p[1] != nullid or p[0] == nullid:
1717 if p[1] != nullid or p[0] == nullid:
1717 b.append((t, n, p[0], p[1]))
1718 b.append((t, n, p[0], p[1]))
1718 break
1719 break
1719 n = p[0]
1720 n = p[0]
1720 return b
1721 return b
1721
1722
1722 def between(self, pairs):
1723 def between(self, pairs):
1723 r = []
1724 r = []
1724
1725
1725 for top, bottom in pairs:
1726 for top, bottom in pairs:
1726 n, l, i = top, [], 0
1727 n, l, i = top, [], 0
1727 f = 1
1728 f = 1
1728
1729
1729 while n != bottom and n != nullid:
1730 while n != bottom and n != nullid:
1730 p = self.changelog.parents(n)[0]
1731 p = self.changelog.parents(n)[0]
1731 if i == f:
1732 if i == f:
1732 l.append(n)
1733 l.append(n)
1733 f = f * 2
1734 f = f * 2
1734 n = p
1735 n = p
1735 i += 1
1736 i += 1
1736
1737
1737 r.append(l)
1738 r.append(l)
1738
1739
1739 return r
1740 return r
1740
1741
1741 def checkpush(self, pushop):
1742 def checkpush(self, pushop):
1742 """Extensions can override this function if additional checks have
1743 """Extensions can override this function if additional checks have
1743 to be performed before pushing, or call it if they override push
1744 to be performed before pushing, or call it if they override push
1744 command.
1745 command.
1745 """
1746 """
1746 pass
1747 pass
1747
1748
1748 @unfilteredpropertycache
1749 @unfilteredpropertycache
1749 def prepushoutgoinghooks(self):
1750 def prepushoutgoinghooks(self):
1750 """Return util.hooks consists of "(repo, remote, outgoing)"
1751 """Return util.hooks consists of "(repo, remote, outgoing)"
1751 functions, which are called before pushing changesets.
1752 functions, which are called before pushing changesets.
1752 """
1753 """
1753 return util.hooks()
1754 return util.hooks()
1754
1755
1755 def stream_in(self, remote, remotereqs):
1756 def stream_in(self, remote, remotereqs):
1756 lock = self.lock()
1757 lock = self.lock()
1757 try:
1758 try:
1758 # Save remote branchmap. We will use it later
1759 # Save remote branchmap. We will use it later
1759 # to speed up branchcache creation
1760 # to speed up branchcache creation
1760 rbranchmap = None
1761 rbranchmap = None
1761 if remote.capable("branchmap"):
1762 if remote.capable("branchmap"):
1762 rbranchmap = remote.branchmap()
1763 rbranchmap = remote.branchmap()
1763
1764
1764 fp = remote.stream_out()
1765 fp = remote.stream_out()
1765 l = fp.readline()
1766 l = fp.readline()
1766 try:
1767 try:
1767 resp = int(l)
1768 resp = int(l)
1768 except ValueError:
1769 except ValueError:
1769 raise error.ResponseError(
1770 raise error.ResponseError(
1770 _('unexpected response from remote server:'), l)
1771 _('unexpected response from remote server:'), l)
1771 if resp == 1:
1772 if resp == 1:
1772 raise util.Abort(_('operation forbidden by server'))
1773 raise util.Abort(_('operation forbidden by server'))
1773 elif resp == 2:
1774 elif resp == 2:
1774 raise util.Abort(_('locking the remote repository failed'))
1775 raise util.Abort(_('locking the remote repository failed'))
1775 elif resp != 0:
1776 elif resp != 0:
1776 raise util.Abort(_('the server sent an unknown error code'))
1777 raise util.Abort(_('the server sent an unknown error code'))
1777 self.ui.status(_('streaming all changes\n'))
1778 self.ui.status(_('streaming all changes\n'))
1778 l = fp.readline()
1779 l = fp.readline()
1779 try:
1780 try:
1780 total_files, total_bytes = map(int, l.split(' ', 1))
1781 total_files, total_bytes = map(int, l.split(' ', 1))
1781 except (ValueError, TypeError):
1782 except (ValueError, TypeError):
1782 raise error.ResponseError(
1783 raise error.ResponseError(
1783 _('unexpected response from remote server:'), l)
1784 _('unexpected response from remote server:'), l)
1784 self.ui.status(_('%d files to transfer, %s of data\n') %
1785 self.ui.status(_('%d files to transfer, %s of data\n') %
1785 (total_files, util.bytecount(total_bytes)))
1786 (total_files, util.bytecount(total_bytes)))
1786 handled_bytes = 0
1787 handled_bytes = 0
1787 self.ui.progress(_('clone'), 0, total=total_bytes)
1788 self.ui.progress(_('clone'), 0, total=total_bytes)
1788 start = time.time()
1789 start = time.time()
1789
1790
1790 tr = self.transaction(_('clone'))
1791 tr = self.transaction(_('clone'))
1791 try:
1792 try:
1792 for i in xrange(total_files):
1793 for i in xrange(total_files):
1793 # XXX doesn't support '\n' or '\r' in filenames
1794 # XXX doesn't support '\n' or '\r' in filenames
1794 l = fp.readline()
1795 l = fp.readline()
1795 try:
1796 try:
1796 name, size = l.split('\0', 1)
1797 name, size = l.split('\0', 1)
1797 size = int(size)
1798 size = int(size)
1798 except (ValueError, TypeError):
1799 except (ValueError, TypeError):
1799 raise error.ResponseError(
1800 raise error.ResponseError(
1800 _('unexpected response from remote server:'), l)
1801 _('unexpected response from remote server:'), l)
1801 if self.ui.debugflag:
1802 if self.ui.debugflag:
1802 self.ui.debug('adding %s (%s)\n' %
1803 self.ui.debug('adding %s (%s)\n' %
1803 (name, util.bytecount(size)))
1804 (name, util.bytecount(size)))
1804 # for backwards compat, name was partially encoded
1805 # for backwards compat, name was partially encoded
1805 ofp = self.svfs(store.decodedir(name), 'w')
1806 ofp = self.svfs(store.decodedir(name), 'w')
1806 for chunk in util.filechunkiter(fp, limit=size):
1807 for chunk in util.filechunkiter(fp, limit=size):
1807 handled_bytes += len(chunk)
1808 handled_bytes += len(chunk)
1808 self.ui.progress(_('clone'), handled_bytes,
1809 self.ui.progress(_('clone'), handled_bytes,
1809 total=total_bytes)
1810 total=total_bytes)
1810 ofp.write(chunk)
1811 ofp.write(chunk)
1811 ofp.close()
1812 ofp.close()
1812 tr.close()
1813 tr.close()
1813 finally:
1814 finally:
1814 tr.release()
1815 tr.release()
1815
1816
1816 # Writing straight to files circumvented the inmemory caches
1817 # Writing straight to files circumvented the inmemory caches
1817 self.invalidate()
1818 self.invalidate()
1818
1819
1819 elapsed = time.time() - start
1820 elapsed = time.time() - start
1820 if elapsed <= 0:
1821 if elapsed <= 0:
1821 elapsed = 0.001
1822 elapsed = 0.001
1822 self.ui.progress(_('clone'), None)
1823 self.ui.progress(_('clone'), None)
1823 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1824 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1824 (util.bytecount(total_bytes), elapsed,
1825 (util.bytecount(total_bytes), elapsed,
1825 util.bytecount(total_bytes / elapsed)))
1826 util.bytecount(total_bytes / elapsed)))
1826
1827
1827 # new requirements = old non-format requirements +
1828 # new requirements = old non-format requirements +
1828 # new format-related remote requirements
1829 # new format-related remote requirements
1829 # requirements from the streamed-in repository
1830 # requirements from the streamed-in repository
1830 self.requirements = remotereqs | (
1831 self.requirements = remotereqs | (
1831 self.requirements - self.supportedformats)
1832 self.requirements - self.supportedformats)
1832 self._applyopenerreqs()
1833 self._applyopenerreqs()
1833 self._writerequirements()
1834 self._writerequirements()
1834
1835
1835 if rbranchmap:
1836 if rbranchmap:
1836 rbheads = []
1837 rbheads = []
1837 closed = []
1838 closed = []
1838 for bheads in rbranchmap.itervalues():
1839 for bheads in rbranchmap.itervalues():
1839 rbheads.extend(bheads)
1840 rbheads.extend(bheads)
1840 for h in bheads:
1841 for h in bheads:
1841 r = self.changelog.rev(h)
1842 r = self.changelog.rev(h)
1842 b, c = self.changelog.branchinfo(r)
1843 b, c = self.changelog.branchinfo(r)
1843 if c:
1844 if c:
1844 closed.append(h)
1845 closed.append(h)
1845
1846
1846 if rbheads:
1847 if rbheads:
1847 rtiprev = max((int(self.changelog.rev(node))
1848 rtiprev = max((int(self.changelog.rev(node))
1848 for node in rbheads))
1849 for node in rbheads))
1849 cache = branchmap.branchcache(rbranchmap,
1850 cache = branchmap.branchcache(rbranchmap,
1850 self[rtiprev].node(),
1851 self[rtiprev].node(),
1851 rtiprev,
1852 rtiprev,
1852 closednodes=closed)
1853 closednodes=closed)
1853 # Try to stick it as low as possible
1854 # Try to stick it as low as possible
1854 # filter above served are unlikely to be fetch from a clone
1855 # filter above served are unlikely to be fetch from a clone
1855 for candidate in ('base', 'immutable', 'served'):
1856 for candidate in ('base', 'immutable', 'served'):
1856 rview = self.filtered(candidate)
1857 rview = self.filtered(candidate)
1857 if cache.validfor(rview):
1858 if cache.validfor(rview):
1858 self._branchcaches[candidate] = cache
1859 self._branchcaches[candidate] = cache
1859 cache.write(rview)
1860 cache.write(rview)
1860 break
1861 break
1861 self.invalidate()
1862 self.invalidate()
1862 return len(self.heads()) + 1
1863 return len(self.heads()) + 1
1863 finally:
1864 finally:
1864 lock.release()
1865 lock.release()
1865
1866
1866 def clone(self, remote, heads=[], stream=None):
1867 def clone(self, remote, heads=[], stream=None):
1867 '''clone remote repository.
1868 '''clone remote repository.
1868
1869
1869 keyword arguments:
1870 keyword arguments:
1870 heads: list of revs to clone (forces use of pull)
1871 heads: list of revs to clone (forces use of pull)
1871 stream: use streaming clone if possible'''
1872 stream: use streaming clone if possible'''
1872
1873
1873 # now, all clients that can request uncompressed clones can
1874 # now, all clients that can request uncompressed clones can
1874 # read repo formats supported by all servers that can serve
1875 # read repo formats supported by all servers that can serve
1875 # them.
1876 # them.
1876
1877
1877 # if revlog format changes, client will have to check version
1878 # if revlog format changes, client will have to check version
1878 # and format flags on "stream" capability, and use
1879 # and format flags on "stream" capability, and use
1879 # uncompressed only if compatible.
1880 # uncompressed only if compatible.
1880
1881
1881 if stream is None:
1882 if stream is None:
1882 # if the server explicitly prefers to stream (for fast LANs)
1883 # if the server explicitly prefers to stream (for fast LANs)
1883 stream = remote.capable('stream-preferred')
1884 stream = remote.capable('stream-preferred')
1884
1885
1885 if stream and not heads:
1886 if stream and not heads:
1886 # 'stream' means remote revlog format is revlogv1 only
1887 # 'stream' means remote revlog format is revlogv1 only
1887 if remote.capable('stream'):
1888 if remote.capable('stream'):
1888 self.stream_in(remote, set(('revlogv1',)))
1889 self.stream_in(remote, set(('revlogv1',)))
1889 else:
1890 else:
1890 # otherwise, 'streamreqs' contains the remote revlog format
1891 # otherwise, 'streamreqs' contains the remote revlog format
1891 streamreqs = remote.capable('streamreqs')
1892 streamreqs = remote.capable('streamreqs')
1892 if streamreqs:
1893 if streamreqs:
1893 streamreqs = set(streamreqs.split(','))
1894 streamreqs = set(streamreqs.split(','))
1894 # if we support it, stream in and adjust our requirements
1895 # if we support it, stream in and adjust our requirements
1895 if not streamreqs - self.supportedformats:
1896 if not streamreqs - self.supportedformats:
1896 self.stream_in(remote, streamreqs)
1897 self.stream_in(remote, streamreqs)
1897
1898
1898 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1899 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1899 try:
1900 try:
1900 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1901 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1901 ret = exchange.pull(self, remote, heads).cgresult
1902 ret = exchange.pull(self, remote, heads).cgresult
1902 finally:
1903 finally:
1903 self.ui.restoreconfig(quiet)
1904 self.ui.restoreconfig(quiet)
1904 return ret
1905 return ret
1905
1906
1906 def pushkey(self, namespace, key, old, new):
1907 def pushkey(self, namespace, key, old, new):
1907 try:
1908 try:
1908 tr = self.currenttransaction()
1909 tr = self.currenttransaction()
1909 hookargs = {}
1910 hookargs = {}
1910 if tr is not None:
1911 if tr is not None:
1911 hookargs.update(tr.hookargs)
1912 hookargs.update(tr.hookargs)
1912 pending = lambda: tr.writepending() and self.root or ""
1913 pending = lambda: tr.writepending() and self.root or ""
1913 hookargs['pending'] = pending
1914 hookargs['pending'] = pending
1914 hookargs['namespace'] = namespace
1915 hookargs['namespace'] = namespace
1915 hookargs['key'] = key
1916 hookargs['key'] = key
1916 hookargs['old'] = old
1917 hookargs['old'] = old
1917 hookargs['new'] = new
1918 hookargs['new'] = new
1918 self.hook('prepushkey', throw=True, **hookargs)
1919 self.hook('prepushkey', throw=True, **hookargs)
1919 except error.HookAbort, exc:
1920 except error.HookAbort, exc:
1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 if exc.hint:
1922 if exc.hint:
1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 return False
1924 return False
1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 ret = pushkey.push(self, namespace, key, old, new)
1926 ret = pushkey.push(self, namespace, key, old, new)
1926 def runhook():
1927 def runhook():
1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 ret=ret)
1929 ret=ret)
1929 self._afterlock(runhook)
1930 self._afterlock(runhook)
1930 return ret
1931 return ret
1931
1932
1932 def listkeys(self, namespace):
1933 def listkeys(self, namespace):
1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 values = pushkey.list(self, namespace)
1936 values = pushkey.list(self, namespace)
1936 self.hook('listkeys', namespace=namespace, values=values)
1937 self.hook('listkeys', namespace=namespace, values=values)
1937 return values
1938 return values
1938
1939
1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 '''used to test argument passing over the wire'''
1941 '''used to test argument passing over the wire'''
1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1942 return "%s %s %s %s %s" % (one, two, three, four, five)
1942
1943
1943 def savecommitmessage(self, text):
1944 def savecommitmessage(self, text):
1944 fp = self.vfs('last-message.txt', 'wb')
1945 fp = self.vfs('last-message.txt', 'wb')
1945 try:
1946 try:
1946 fp.write(text)
1947 fp.write(text)
1947 finally:
1948 finally:
1948 fp.close()
1949 fp.close()
1949 return self.pathto(fp.name[len(self.root) + 1:])
1950 return self.pathto(fp.name[len(self.root) + 1:])
1950
1951
1951 # used to avoid circular references so destructors work
1952 # used to avoid circular references so destructors work
1952 def aftertrans(files):
1953 def aftertrans(files):
1953 renamefiles = [tuple(t) for t in files]
1954 renamefiles = [tuple(t) for t in files]
1954 def a():
1955 def a():
1955 for vfs, src, dest in renamefiles:
1956 for vfs, src, dest in renamefiles:
1956 try:
1957 try:
1957 vfs.rename(src, dest)
1958 vfs.rename(src, dest)
1958 except OSError: # journal file does not yet exist
1959 except OSError: # journal file does not yet exist
1959 pass
1960 pass
1960 return a
1961 return a
1961
1962
1962 def undoname(fn):
1963 def undoname(fn):
1963 base, name = os.path.split(fn)
1964 base, name = os.path.split(fn)
1964 assert name.startswith('journal')
1965 assert name.startswith('journal')
1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1966 return os.path.join(base, name.replace('journal', 'undo', 1))
1966
1967
1967 def instance(ui, path, create):
1968 def instance(ui, path, create):
1968 return localrepository(ui, util.urllocalpath(path), create)
1969 return localrepository(ui, util.urllocalpath(path), create)
1969
1970
1970 def islocal(path):
1971 def islocal(path):
1971 return True
1972 return True
General Comments 0
You need to be logged in to leave comments. Login now