##// END OF EJS Templates
localrepo: rename requirements parameter in stream_in()...
Drew Gottlieb -
r24917:71a738a6 default
parent child Browse files
Show More
@@ -1,1971 +1,1971 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced, exc:
155 except error.PushRaced, exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 'dotencode'))
197 'dotencode'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 filtername = None
199 filtername = None
200
200
201 # a list of (ui, featureset) functions.
201 # a list of (ui, featureset) functions.
202 # only functions defined in module of enabled extensions are invoked
202 # only functions defined in module of enabled extensions are invoked
203 featuresetupfuncs = set()
203 featuresetupfuncs = set()
204
204
205 def _baserequirements(self, create):
205 def _baserequirements(self, create):
206 return ['revlogv1']
206 return ['revlogv1']
207
207
208 def __init__(self, baseui, path=None, create=False):
208 def __init__(self, baseui, path=None, create=False):
209 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
209 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
210 self.wopener = self.wvfs
210 self.wopener = self.wvfs
211 self.root = self.wvfs.base
211 self.root = self.wvfs.base
212 self.path = self.wvfs.join(".hg")
212 self.path = self.wvfs.join(".hg")
213 self.origroot = path
213 self.origroot = path
214 self.auditor = pathutil.pathauditor(self.root, self._checknested)
214 self.auditor = pathutil.pathauditor(self.root, self._checknested)
215 self.vfs = scmutil.vfs(self.path)
215 self.vfs = scmutil.vfs(self.path)
216 self.opener = self.vfs
216 self.opener = self.vfs
217 self.baseui = baseui
217 self.baseui = baseui
218 self.ui = baseui.copy()
218 self.ui = baseui.copy()
219 self.ui.copy = baseui.copy # prevent copying repo configuration
219 self.ui.copy = baseui.copy # prevent copying repo configuration
220 # A list of callback to shape the phase if no data were found.
220 # A list of callback to shape the phase if no data were found.
221 # Callback are in the form: func(repo, roots) --> processed root.
221 # Callback are in the form: func(repo, roots) --> processed root.
222 # This list it to be filled by extension during repo setup
222 # This list it to be filled by extension during repo setup
223 self._phasedefaults = []
223 self._phasedefaults = []
224 try:
224 try:
225 self.ui.readconfig(self.join("hgrc"), self.root)
225 self.ui.readconfig(self.join("hgrc"), self.root)
226 extensions.loadall(self.ui)
226 extensions.loadall(self.ui)
227 except IOError:
227 except IOError:
228 pass
228 pass
229
229
230 if self.featuresetupfuncs:
230 if self.featuresetupfuncs:
231 self.supported = set(self._basesupported) # use private copy
231 self.supported = set(self._basesupported) # use private copy
232 extmods = set(m.__name__ for n, m
232 extmods = set(m.__name__ for n, m
233 in extensions.extensions(self.ui))
233 in extensions.extensions(self.ui))
234 for setupfunc in self.featuresetupfuncs:
234 for setupfunc in self.featuresetupfuncs:
235 if setupfunc.__module__ in extmods:
235 if setupfunc.__module__ in extmods:
236 setupfunc(self.ui, self.supported)
236 setupfunc(self.ui, self.supported)
237 else:
237 else:
238 self.supported = self._basesupported
238 self.supported = self._basesupported
239
239
240 if not self.vfs.isdir():
240 if not self.vfs.isdir():
241 if create:
241 if create:
242 if not self.wvfs.exists():
242 if not self.wvfs.exists():
243 self.wvfs.makedirs()
243 self.wvfs.makedirs()
244 self.vfs.makedir(notindexed=True)
244 self.vfs.makedir(notindexed=True)
245 requirements = set(self._baserequirements(create))
245 requirements = set(self._baserequirements(create))
246 if self.ui.configbool('format', 'usestore', True):
246 if self.ui.configbool('format', 'usestore', True):
247 self.vfs.mkdir("store")
247 self.vfs.mkdir("store")
248 requirements.add("store")
248 requirements.add("store")
249 if self.ui.configbool('format', 'usefncache', True):
249 if self.ui.configbool('format', 'usefncache', True):
250 requirements.add("fncache")
250 requirements.add("fncache")
251 if self.ui.configbool('format', 'dotencode', True):
251 if self.ui.configbool('format', 'dotencode', True):
252 requirements.add('dotencode')
252 requirements.add('dotencode')
253 # create an invalid changelog
253 # create an invalid changelog
254 self.vfs.append(
254 self.vfs.append(
255 "00changelog.i",
255 "00changelog.i",
256 '\0\0\0\2' # represents revlogv2
256 '\0\0\0\2' # represents revlogv2
257 ' dummy changelog to prevent using the old repo layout'
257 ' dummy changelog to prevent using the old repo layout'
258 )
258 )
259 if self.ui.configbool('format', 'generaldelta', False):
259 if self.ui.configbool('format', 'generaldelta', False):
260 requirements.add("generaldelta")
260 requirements.add("generaldelta")
261 if self.ui.configbool('experimental', 'manifestv2', False):
261 if self.ui.configbool('experimental', 'manifestv2', False):
262 requirements.add("manifestv2")
262 requirements.add("manifestv2")
263 else:
263 else:
264 raise error.RepoError(_("repository %s not found") % path)
264 raise error.RepoError(_("repository %s not found") % path)
265 elif create:
265 elif create:
266 raise error.RepoError(_("repository %s already exists") % path)
266 raise error.RepoError(_("repository %s already exists") % path)
267 else:
267 else:
268 try:
268 try:
269 requirements = scmutil.readrequires(self.vfs, self.supported)
269 requirements = scmutil.readrequires(self.vfs, self.supported)
270 except IOError, inst:
270 except IOError, inst:
271 if inst.errno != errno.ENOENT:
271 if inst.errno != errno.ENOENT:
272 raise
272 raise
273 requirements = set()
273 requirements = set()
274
274
275 self.sharedpath = self.path
275 self.sharedpath = self.path
276 try:
276 try:
277 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
277 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
278 realpath=True)
278 realpath=True)
279 s = vfs.base
279 s = vfs.base
280 if not vfs.exists():
280 if not vfs.exists():
281 raise error.RepoError(
281 raise error.RepoError(
282 _('.hg/sharedpath points to nonexistent directory %s') % s)
282 _('.hg/sharedpath points to nonexistent directory %s') % s)
283 self.sharedpath = s
283 self.sharedpath = s
284 except IOError, inst:
284 except IOError, inst:
285 if inst.errno != errno.ENOENT:
285 if inst.errno != errno.ENOENT:
286 raise
286 raise
287
287
288 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
288 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
289 self.spath = self.store.path
289 self.spath = self.store.path
290 self.svfs = self.store.vfs
290 self.svfs = self.store.vfs
291 self.sopener = self.svfs
291 self.sopener = self.svfs
292 self.sjoin = self.store.join
292 self.sjoin = self.store.join
293 self.vfs.createmode = self.store.createmode
293 self.vfs.createmode = self.store.createmode
294 self.requirements = requirements
294 self.requirements = requirements
295 self._applyopenerreqs()
295 self._applyopenerreqs()
296 if create:
296 if create:
297 self._writerequirements()
297 self._writerequirements()
298
298
299
299
300 self._branchcaches = {}
300 self._branchcaches = {}
301 self._revbranchcache = None
301 self._revbranchcache = None
302 self.filterpats = {}
302 self.filterpats = {}
303 self._datafilters = {}
303 self._datafilters = {}
304 self._transref = self._lockref = self._wlockref = None
304 self._transref = self._lockref = self._wlockref = None
305
305
306 # A cache for various files under .hg/ that tracks file changes,
306 # A cache for various files under .hg/ that tracks file changes,
307 # (used by the filecache decorator)
307 # (used by the filecache decorator)
308 #
308 #
309 # Maps a property name to its util.filecacheentry
309 # Maps a property name to its util.filecacheentry
310 self._filecache = {}
310 self._filecache = {}
311
311
312 # hold sets of revision to be filtered
312 # hold sets of revision to be filtered
313 # should be cleared when something might have changed the filter value:
313 # should be cleared when something might have changed the filter value:
314 # - new changesets,
314 # - new changesets,
315 # - phase change,
315 # - phase change,
316 # - new obsolescence marker,
316 # - new obsolescence marker,
317 # - working directory parent change,
317 # - working directory parent change,
318 # - bookmark changes
318 # - bookmark changes
319 self.filteredrevcache = {}
319 self.filteredrevcache = {}
320
320
321 # generic mapping between names and nodes
321 # generic mapping between names and nodes
322 self.names = namespaces.namespaces()
322 self.names = namespaces.namespaces()
323
323
324 def close(self):
324 def close(self):
325 self._writecaches()
325 self._writecaches()
326
326
327 def _writecaches(self):
327 def _writecaches(self):
328 if self._revbranchcache:
328 if self._revbranchcache:
329 self._revbranchcache.write()
329 self._revbranchcache.write()
330
330
331 def _restrictcapabilities(self, caps):
331 def _restrictcapabilities(self, caps):
332 if self.ui.configbool('experimental', 'bundle2-advertise', True):
332 if self.ui.configbool('experimental', 'bundle2-advertise', True):
333 caps = set(caps)
333 caps = set(caps)
334 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
334 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
335 caps.add('bundle2=' + urllib.quote(capsblob))
335 caps.add('bundle2=' + urllib.quote(capsblob))
336 return caps
336 return caps
337
337
338 def _applyopenerreqs(self):
338 def _applyopenerreqs(self):
339 self.svfs.options = dict((r, 1) for r in self.requirements
339 self.svfs.options = dict((r, 1) for r in self.requirements
340 if r in self.openerreqs)
340 if r in self.openerreqs)
341 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
341 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
342 if chunkcachesize is not None:
342 if chunkcachesize is not None:
343 self.svfs.options['chunkcachesize'] = chunkcachesize
343 self.svfs.options['chunkcachesize'] = chunkcachesize
344 maxchainlen = self.ui.configint('format', 'maxchainlen')
344 maxchainlen = self.ui.configint('format', 'maxchainlen')
345 if maxchainlen is not None:
345 if maxchainlen is not None:
346 self.svfs.options['maxchainlen'] = maxchainlen
346 self.svfs.options['maxchainlen'] = maxchainlen
347 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
347 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
348 if manifestcachesize is not None:
348 if manifestcachesize is not None:
349 self.svfs.options['manifestcachesize'] = manifestcachesize
349 self.svfs.options['manifestcachesize'] = manifestcachesize
350 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
350 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
351 if usetreemanifest is not None:
351 if usetreemanifest is not None:
352 self.svfs.options['usetreemanifest'] = usetreemanifest
352 self.svfs.options['usetreemanifest'] = usetreemanifest
353
353
354 def _writerequirements(self):
354 def _writerequirements(self):
355 reqfile = self.vfs("requires", "w")
355 reqfile = self.vfs("requires", "w")
356 for r in sorted(self.requirements):
356 for r in sorted(self.requirements):
357 reqfile.write("%s\n" % r)
357 reqfile.write("%s\n" % r)
358 reqfile.close()
358 reqfile.close()
359
359
360 def _checknested(self, path):
360 def _checknested(self, path):
361 """Determine if path is a legal nested repository."""
361 """Determine if path is a legal nested repository."""
362 if not path.startswith(self.root):
362 if not path.startswith(self.root):
363 return False
363 return False
364 subpath = path[len(self.root) + 1:]
364 subpath = path[len(self.root) + 1:]
365 normsubpath = util.pconvert(subpath)
365 normsubpath = util.pconvert(subpath)
366
366
367 # XXX: Checking against the current working copy is wrong in
367 # XXX: Checking against the current working copy is wrong in
368 # the sense that it can reject things like
368 # the sense that it can reject things like
369 #
369 #
370 # $ hg cat -r 10 sub/x.txt
370 # $ hg cat -r 10 sub/x.txt
371 #
371 #
372 # if sub/ is no longer a subrepository in the working copy
372 # if sub/ is no longer a subrepository in the working copy
373 # parent revision.
373 # parent revision.
374 #
374 #
375 # However, it can of course also allow things that would have
375 # However, it can of course also allow things that would have
376 # been rejected before, such as the above cat command if sub/
376 # been rejected before, such as the above cat command if sub/
377 # is a subrepository now, but was a normal directory before.
377 # is a subrepository now, but was a normal directory before.
378 # The old path auditor would have rejected by mistake since it
378 # The old path auditor would have rejected by mistake since it
379 # panics when it sees sub/.hg/.
379 # panics when it sees sub/.hg/.
380 #
380 #
381 # All in all, checking against the working copy seems sensible
381 # All in all, checking against the working copy seems sensible
382 # since we want to prevent access to nested repositories on
382 # since we want to prevent access to nested repositories on
383 # the filesystem *now*.
383 # the filesystem *now*.
384 ctx = self[None]
384 ctx = self[None]
385 parts = util.splitpath(subpath)
385 parts = util.splitpath(subpath)
386 while parts:
386 while parts:
387 prefix = '/'.join(parts)
387 prefix = '/'.join(parts)
388 if prefix in ctx.substate:
388 if prefix in ctx.substate:
389 if prefix == normsubpath:
389 if prefix == normsubpath:
390 return True
390 return True
391 else:
391 else:
392 sub = ctx.sub(prefix)
392 sub = ctx.sub(prefix)
393 return sub.checknested(subpath[len(prefix) + 1:])
393 return sub.checknested(subpath[len(prefix) + 1:])
394 else:
394 else:
395 parts.pop()
395 parts.pop()
396 return False
396 return False
397
397
398 def peer(self):
398 def peer(self):
399 return localpeer(self) # not cached to avoid reference cycle
399 return localpeer(self) # not cached to avoid reference cycle
400
400
401 def unfiltered(self):
401 def unfiltered(self):
402 """Return unfiltered version of the repository
402 """Return unfiltered version of the repository
403
403
404 Intended to be overwritten by filtered repo."""
404 Intended to be overwritten by filtered repo."""
405 return self
405 return self
406
406
407 def filtered(self, name):
407 def filtered(self, name):
408 """Return a filtered version of a repository"""
408 """Return a filtered version of a repository"""
409 # build a new class with the mixin and the current class
409 # build a new class with the mixin and the current class
410 # (possibly subclass of the repo)
410 # (possibly subclass of the repo)
411 class proxycls(repoview.repoview, self.unfiltered().__class__):
411 class proxycls(repoview.repoview, self.unfiltered().__class__):
412 pass
412 pass
413 return proxycls(self, name)
413 return proxycls(self, name)
414
414
415 @repofilecache('bookmarks')
415 @repofilecache('bookmarks')
416 def _bookmarks(self):
416 def _bookmarks(self):
417 return bookmarks.bmstore(self)
417 return bookmarks.bmstore(self)
418
418
419 @repofilecache('bookmarks.current')
419 @repofilecache('bookmarks.current')
420 def _bookmarkcurrent(self):
420 def _bookmarkcurrent(self):
421 return bookmarks.readcurrent(self)
421 return bookmarks.readcurrent(self)
422
422
423 def bookmarkheads(self, bookmark):
423 def bookmarkheads(self, bookmark):
424 name = bookmark.split('@', 1)[0]
424 name = bookmark.split('@', 1)[0]
425 heads = []
425 heads = []
426 for mark, n in self._bookmarks.iteritems():
426 for mark, n in self._bookmarks.iteritems():
427 if mark.split('@', 1)[0] == name:
427 if mark.split('@', 1)[0] == name:
428 heads.append(n)
428 heads.append(n)
429 return heads
429 return heads
430
430
431 @storecache('phaseroots')
431 @storecache('phaseroots')
432 def _phasecache(self):
432 def _phasecache(self):
433 return phases.phasecache(self, self._phasedefaults)
433 return phases.phasecache(self, self._phasedefaults)
434
434
435 @storecache('obsstore')
435 @storecache('obsstore')
436 def obsstore(self):
436 def obsstore(self):
437 # read default format for new obsstore.
437 # read default format for new obsstore.
438 defaultformat = self.ui.configint('format', 'obsstore-version', None)
438 defaultformat = self.ui.configint('format', 'obsstore-version', None)
439 # rely on obsstore class default when possible.
439 # rely on obsstore class default when possible.
440 kwargs = {}
440 kwargs = {}
441 if defaultformat is not None:
441 if defaultformat is not None:
442 kwargs['defaultformat'] = defaultformat
442 kwargs['defaultformat'] = defaultformat
443 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
443 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
444 store = obsolete.obsstore(self.svfs, readonly=readonly,
444 store = obsolete.obsstore(self.svfs, readonly=readonly,
445 **kwargs)
445 **kwargs)
446 if store and readonly:
446 if store and readonly:
447 self.ui.warn(
447 self.ui.warn(
448 _('obsolete feature not enabled but %i markers found!\n')
448 _('obsolete feature not enabled but %i markers found!\n')
449 % len(list(store)))
449 % len(list(store)))
450 return store
450 return store
451
451
452 @storecache('00changelog.i')
452 @storecache('00changelog.i')
453 def changelog(self):
453 def changelog(self):
454 c = changelog.changelog(self.svfs)
454 c = changelog.changelog(self.svfs)
455 if 'HG_PENDING' in os.environ:
455 if 'HG_PENDING' in os.environ:
456 p = os.environ['HG_PENDING']
456 p = os.environ['HG_PENDING']
457 if p.startswith(self.root):
457 if p.startswith(self.root):
458 c.readpending('00changelog.i.a')
458 c.readpending('00changelog.i.a')
459 return c
459 return c
460
460
461 @storecache('00manifest.i')
461 @storecache('00manifest.i')
462 def manifest(self):
462 def manifest(self):
463 return manifest.manifest(self.svfs)
463 return manifest.manifest(self.svfs)
464
464
465 @repofilecache('dirstate')
465 @repofilecache('dirstate')
466 def dirstate(self):
466 def dirstate(self):
467 warned = [0]
467 warned = [0]
468 def validate(node):
468 def validate(node):
469 try:
469 try:
470 self.changelog.rev(node)
470 self.changelog.rev(node)
471 return node
471 return node
472 except error.LookupError:
472 except error.LookupError:
473 if not warned[0]:
473 if not warned[0]:
474 warned[0] = True
474 warned[0] = True
475 self.ui.warn(_("warning: ignoring unknown"
475 self.ui.warn(_("warning: ignoring unknown"
476 " working parent %s!\n") % short(node))
476 " working parent %s!\n") % short(node))
477 return nullid
477 return nullid
478
478
479 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
479 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
480
480
481 def __getitem__(self, changeid):
481 def __getitem__(self, changeid):
482 if changeid is None:
482 if changeid is None:
483 return context.workingctx(self)
483 return context.workingctx(self)
484 if isinstance(changeid, slice):
484 if isinstance(changeid, slice):
485 return [context.changectx(self, i)
485 return [context.changectx(self, i)
486 for i in xrange(*changeid.indices(len(self)))
486 for i in xrange(*changeid.indices(len(self)))
487 if i not in self.changelog.filteredrevs]
487 if i not in self.changelog.filteredrevs]
488 return context.changectx(self, changeid)
488 return context.changectx(self, changeid)
489
489
490 def __contains__(self, changeid):
490 def __contains__(self, changeid):
491 try:
491 try:
492 self[changeid]
492 self[changeid]
493 return True
493 return True
494 except error.RepoLookupError:
494 except error.RepoLookupError:
495 return False
495 return False
496
496
497 def __nonzero__(self):
497 def __nonzero__(self):
498 return True
498 return True
499
499
500 def __len__(self):
500 def __len__(self):
501 return len(self.changelog)
501 return len(self.changelog)
502
502
503 def __iter__(self):
503 def __iter__(self):
504 return iter(self.changelog)
504 return iter(self.changelog)
505
505
506 def revs(self, expr, *args):
506 def revs(self, expr, *args):
507 '''Return a list of revisions matching the given revset'''
507 '''Return a list of revisions matching the given revset'''
508 expr = revset.formatspec(expr, *args)
508 expr = revset.formatspec(expr, *args)
509 m = revset.match(None, expr)
509 m = revset.match(None, expr)
510 return m(self)
510 return m(self)
511
511
512 def set(self, expr, *args):
512 def set(self, expr, *args):
513 '''
513 '''
514 Yield a context for each matching revision, after doing arg
514 Yield a context for each matching revision, after doing arg
515 replacement via revset.formatspec
515 replacement via revset.formatspec
516 '''
516 '''
517 for r in self.revs(expr, *args):
517 for r in self.revs(expr, *args):
518 yield self[r]
518 yield self[r]
519
519
520 def url(self):
520 def url(self):
521 return 'file:' + self.root
521 return 'file:' + self.root
522
522
523 def hook(self, name, throw=False, **args):
523 def hook(self, name, throw=False, **args):
524 """Call a hook, passing this repo instance.
524 """Call a hook, passing this repo instance.
525
525
526 This a convenience method to aid invoking hooks. Extensions likely
526 This a convenience method to aid invoking hooks. Extensions likely
527 won't call this unless they have registered a custom hook or are
527 won't call this unless they have registered a custom hook or are
528 replacing code that is expected to call a hook.
528 replacing code that is expected to call a hook.
529 """
529 """
530 return hook.hook(self.ui, self, name, throw, **args)
530 return hook.hook(self.ui, self, name, throw, **args)
531
531
532 @unfilteredmethod
532 @unfilteredmethod
533 def _tag(self, names, node, message, local, user, date, extra={},
533 def _tag(self, names, node, message, local, user, date, extra={},
534 editor=False):
534 editor=False):
535 if isinstance(names, str):
535 if isinstance(names, str):
536 names = (names,)
536 names = (names,)
537
537
538 branches = self.branchmap()
538 branches = self.branchmap()
539 for name in names:
539 for name in names:
540 self.hook('pretag', throw=True, node=hex(node), tag=name,
540 self.hook('pretag', throw=True, node=hex(node), tag=name,
541 local=local)
541 local=local)
542 if name in branches:
542 if name in branches:
543 self.ui.warn(_("warning: tag %s conflicts with existing"
543 self.ui.warn(_("warning: tag %s conflicts with existing"
544 " branch name\n") % name)
544 " branch name\n") % name)
545
545
546 def writetags(fp, names, munge, prevtags):
546 def writetags(fp, names, munge, prevtags):
547 fp.seek(0, 2)
547 fp.seek(0, 2)
548 if prevtags and prevtags[-1] != '\n':
548 if prevtags and prevtags[-1] != '\n':
549 fp.write('\n')
549 fp.write('\n')
550 for name in names:
550 for name in names:
551 if munge:
551 if munge:
552 m = munge(name)
552 m = munge(name)
553 else:
553 else:
554 m = name
554 m = name
555
555
556 if (self._tagscache.tagtypes and
556 if (self._tagscache.tagtypes and
557 name in self._tagscache.tagtypes):
557 name in self._tagscache.tagtypes):
558 old = self.tags().get(name, nullid)
558 old = self.tags().get(name, nullid)
559 fp.write('%s %s\n' % (hex(old), m))
559 fp.write('%s %s\n' % (hex(old), m))
560 fp.write('%s %s\n' % (hex(node), m))
560 fp.write('%s %s\n' % (hex(node), m))
561 fp.close()
561 fp.close()
562
562
563 prevtags = ''
563 prevtags = ''
564 if local:
564 if local:
565 try:
565 try:
566 fp = self.vfs('localtags', 'r+')
566 fp = self.vfs('localtags', 'r+')
567 except IOError:
567 except IOError:
568 fp = self.vfs('localtags', 'a')
568 fp = self.vfs('localtags', 'a')
569 else:
569 else:
570 prevtags = fp.read()
570 prevtags = fp.read()
571
571
572 # local tags are stored in the current charset
572 # local tags are stored in the current charset
573 writetags(fp, names, None, prevtags)
573 writetags(fp, names, None, prevtags)
574 for name in names:
574 for name in names:
575 self.hook('tag', node=hex(node), tag=name, local=local)
575 self.hook('tag', node=hex(node), tag=name, local=local)
576 return
576 return
577
577
578 try:
578 try:
579 fp = self.wfile('.hgtags', 'rb+')
579 fp = self.wfile('.hgtags', 'rb+')
580 except IOError, e:
580 except IOError, e:
581 if e.errno != errno.ENOENT:
581 if e.errno != errno.ENOENT:
582 raise
582 raise
583 fp = self.wfile('.hgtags', 'ab')
583 fp = self.wfile('.hgtags', 'ab')
584 else:
584 else:
585 prevtags = fp.read()
585 prevtags = fp.read()
586
586
587 # committed tags are stored in UTF-8
587 # committed tags are stored in UTF-8
588 writetags(fp, names, encoding.fromlocal, prevtags)
588 writetags(fp, names, encoding.fromlocal, prevtags)
589
589
590 fp.close()
590 fp.close()
591
591
592 self.invalidatecaches()
592 self.invalidatecaches()
593
593
594 if '.hgtags' not in self.dirstate:
594 if '.hgtags' not in self.dirstate:
595 self[None].add(['.hgtags'])
595 self[None].add(['.hgtags'])
596
596
597 m = matchmod.exact(self.root, '', ['.hgtags'])
597 m = matchmod.exact(self.root, '', ['.hgtags'])
598 tagnode = self.commit(message, user, date, extra=extra, match=m,
598 tagnode = self.commit(message, user, date, extra=extra, match=m,
599 editor=editor)
599 editor=editor)
600
600
601 for name in names:
601 for name in names:
602 self.hook('tag', node=hex(node), tag=name, local=local)
602 self.hook('tag', node=hex(node), tag=name, local=local)
603
603
604 return tagnode
604 return tagnode
605
605
606 def tag(self, names, node, message, local, user, date, editor=False):
606 def tag(self, names, node, message, local, user, date, editor=False):
607 '''tag a revision with one or more symbolic names.
607 '''tag a revision with one or more symbolic names.
608
608
609 names is a list of strings or, when adding a single tag, names may be a
609 names is a list of strings or, when adding a single tag, names may be a
610 string.
610 string.
611
611
612 if local is True, the tags are stored in a per-repository file.
612 if local is True, the tags are stored in a per-repository file.
613 otherwise, they are stored in the .hgtags file, and a new
613 otherwise, they are stored in the .hgtags file, and a new
614 changeset is committed with the change.
614 changeset is committed with the change.
615
615
616 keyword arguments:
616 keyword arguments:
617
617
618 local: whether to store tags in non-version-controlled file
618 local: whether to store tags in non-version-controlled file
619 (default False)
619 (default False)
620
620
621 message: commit message to use if committing
621 message: commit message to use if committing
622
622
623 user: name of user to use if committing
623 user: name of user to use if committing
624
624
625 date: date tuple to use if committing'''
625 date: date tuple to use if committing'''
626
626
627 if not local:
627 if not local:
628 m = matchmod.exact(self.root, '', ['.hgtags'])
628 m = matchmod.exact(self.root, '', ['.hgtags'])
629 if util.any(self.status(match=m, unknown=True, ignored=True)):
629 if util.any(self.status(match=m, unknown=True, ignored=True)):
630 raise util.Abort(_('working copy of .hgtags is changed'),
630 raise util.Abort(_('working copy of .hgtags is changed'),
631 hint=_('please commit .hgtags manually'))
631 hint=_('please commit .hgtags manually'))
632
632
633 self.tags() # instantiate the cache
633 self.tags() # instantiate the cache
634 self._tag(names, node, message, local, user, date, editor=editor)
634 self._tag(names, node, message, local, user, date, editor=editor)
635
635
636 @filteredpropertycache
636 @filteredpropertycache
637 def _tagscache(self):
637 def _tagscache(self):
638 '''Returns a tagscache object that contains various tags related
638 '''Returns a tagscache object that contains various tags related
639 caches.'''
639 caches.'''
640
640
641 # This simplifies its cache management by having one decorated
641 # This simplifies its cache management by having one decorated
642 # function (this one) and the rest simply fetch things from it.
642 # function (this one) and the rest simply fetch things from it.
643 class tagscache(object):
643 class tagscache(object):
644 def __init__(self):
644 def __init__(self):
645 # These two define the set of tags for this repository. tags
645 # These two define the set of tags for this repository. tags
646 # maps tag name to node; tagtypes maps tag name to 'global' or
646 # maps tag name to node; tagtypes maps tag name to 'global' or
647 # 'local'. (Global tags are defined by .hgtags across all
647 # 'local'. (Global tags are defined by .hgtags across all
648 # heads, and local tags are defined in .hg/localtags.)
648 # heads, and local tags are defined in .hg/localtags.)
649 # They constitute the in-memory cache of tags.
649 # They constitute the in-memory cache of tags.
650 self.tags = self.tagtypes = None
650 self.tags = self.tagtypes = None
651
651
652 self.nodetagscache = self.tagslist = None
652 self.nodetagscache = self.tagslist = None
653
653
654 cache = tagscache()
654 cache = tagscache()
655 cache.tags, cache.tagtypes = self._findtags()
655 cache.tags, cache.tagtypes = self._findtags()
656
656
657 return cache
657 return cache
658
658
659 def tags(self):
659 def tags(self):
660 '''return a mapping of tag to node'''
660 '''return a mapping of tag to node'''
661 t = {}
661 t = {}
662 if self.changelog.filteredrevs:
662 if self.changelog.filteredrevs:
663 tags, tt = self._findtags()
663 tags, tt = self._findtags()
664 else:
664 else:
665 tags = self._tagscache.tags
665 tags = self._tagscache.tags
666 for k, v in tags.iteritems():
666 for k, v in tags.iteritems():
667 try:
667 try:
668 # ignore tags to unknown nodes
668 # ignore tags to unknown nodes
669 self.changelog.rev(v)
669 self.changelog.rev(v)
670 t[k] = v
670 t[k] = v
671 except (error.LookupError, ValueError):
671 except (error.LookupError, ValueError):
672 pass
672 pass
673 return t
673 return t
674
674
675 def _findtags(self):
675 def _findtags(self):
676 '''Do the hard work of finding tags. Return a pair of dicts
676 '''Do the hard work of finding tags. Return a pair of dicts
677 (tags, tagtypes) where tags maps tag name to node, and tagtypes
677 (tags, tagtypes) where tags maps tag name to node, and tagtypes
678 maps tag name to a string like \'global\' or \'local\'.
678 maps tag name to a string like \'global\' or \'local\'.
679 Subclasses or extensions are free to add their own tags, but
679 Subclasses or extensions are free to add their own tags, but
680 should be aware that the returned dicts will be retained for the
680 should be aware that the returned dicts will be retained for the
681 duration of the localrepo object.'''
681 duration of the localrepo object.'''
682
682
683 # XXX what tagtype should subclasses/extensions use? Currently
683 # XXX what tagtype should subclasses/extensions use? Currently
684 # mq and bookmarks add tags, but do not set the tagtype at all.
684 # mq and bookmarks add tags, but do not set the tagtype at all.
685 # Should each extension invent its own tag type? Should there
685 # Should each extension invent its own tag type? Should there
686 # be one tagtype for all such "virtual" tags? Or is the status
686 # be one tagtype for all such "virtual" tags? Or is the status
687 # quo fine?
687 # quo fine?
688
688
689 alltags = {} # map tag name to (node, hist)
689 alltags = {} # map tag name to (node, hist)
690 tagtypes = {}
690 tagtypes = {}
691
691
692 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
692 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
693 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
693 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
694
694
695 # Build the return dicts. Have to re-encode tag names because
695 # Build the return dicts. Have to re-encode tag names because
696 # the tags module always uses UTF-8 (in order not to lose info
696 # the tags module always uses UTF-8 (in order not to lose info
697 # writing to the cache), but the rest of Mercurial wants them in
697 # writing to the cache), but the rest of Mercurial wants them in
698 # local encoding.
698 # local encoding.
699 tags = {}
699 tags = {}
700 for (name, (node, hist)) in alltags.iteritems():
700 for (name, (node, hist)) in alltags.iteritems():
701 if node != nullid:
701 if node != nullid:
702 tags[encoding.tolocal(name)] = node
702 tags[encoding.tolocal(name)] = node
703 tags['tip'] = self.changelog.tip()
703 tags['tip'] = self.changelog.tip()
704 tagtypes = dict([(encoding.tolocal(name), value)
704 tagtypes = dict([(encoding.tolocal(name), value)
705 for (name, value) in tagtypes.iteritems()])
705 for (name, value) in tagtypes.iteritems()])
706 return (tags, tagtypes)
706 return (tags, tagtypes)
707
707
708 def tagtype(self, tagname):
708 def tagtype(self, tagname):
709 '''
709 '''
710 return the type of the given tag. result can be:
710 return the type of the given tag. result can be:
711
711
712 'local' : a local tag
712 'local' : a local tag
713 'global' : a global tag
713 'global' : a global tag
714 None : tag does not exist
714 None : tag does not exist
715 '''
715 '''
716
716
717 return self._tagscache.tagtypes.get(tagname)
717 return self._tagscache.tagtypes.get(tagname)
718
718
719 def tagslist(self):
719 def tagslist(self):
720 '''return a list of tags ordered by revision'''
720 '''return a list of tags ordered by revision'''
721 if not self._tagscache.tagslist:
721 if not self._tagscache.tagslist:
722 l = []
722 l = []
723 for t, n in self.tags().iteritems():
723 for t, n in self.tags().iteritems():
724 l.append((self.changelog.rev(n), t, n))
724 l.append((self.changelog.rev(n), t, n))
725 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
725 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
726
726
727 return self._tagscache.tagslist
727 return self._tagscache.tagslist
728
728
729 def nodetags(self, node):
729 def nodetags(self, node):
730 '''return the tags associated with a node'''
730 '''return the tags associated with a node'''
731 if not self._tagscache.nodetagscache:
731 if not self._tagscache.nodetagscache:
732 nodetagscache = {}
732 nodetagscache = {}
733 for t, n in self._tagscache.tags.iteritems():
733 for t, n in self._tagscache.tags.iteritems():
734 nodetagscache.setdefault(n, []).append(t)
734 nodetagscache.setdefault(n, []).append(t)
735 for tags in nodetagscache.itervalues():
735 for tags in nodetagscache.itervalues():
736 tags.sort()
736 tags.sort()
737 self._tagscache.nodetagscache = nodetagscache
737 self._tagscache.nodetagscache = nodetagscache
738 return self._tagscache.nodetagscache.get(node, [])
738 return self._tagscache.nodetagscache.get(node, [])
739
739
740 def nodebookmarks(self, node):
740 def nodebookmarks(self, node):
741 marks = []
741 marks = []
742 for bookmark, n in self._bookmarks.iteritems():
742 for bookmark, n in self._bookmarks.iteritems():
743 if n == node:
743 if n == node:
744 marks.append(bookmark)
744 marks.append(bookmark)
745 return sorted(marks)
745 return sorted(marks)
746
746
747 def branchmap(self):
747 def branchmap(self):
748 '''returns a dictionary {branch: [branchheads]} with branchheads
748 '''returns a dictionary {branch: [branchheads]} with branchheads
749 ordered by increasing revision number'''
749 ordered by increasing revision number'''
750 branchmap.updatecache(self)
750 branchmap.updatecache(self)
751 return self._branchcaches[self.filtername]
751 return self._branchcaches[self.filtername]
752
752
753 @unfilteredmethod
753 @unfilteredmethod
754 def revbranchcache(self):
754 def revbranchcache(self):
755 if not self._revbranchcache:
755 if not self._revbranchcache:
756 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
756 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
757 return self._revbranchcache
757 return self._revbranchcache
758
758
759 def branchtip(self, branch, ignoremissing=False):
759 def branchtip(self, branch, ignoremissing=False):
760 '''return the tip node for a given branch
760 '''return the tip node for a given branch
761
761
762 If ignoremissing is True, then this method will not raise an error.
762 If ignoremissing is True, then this method will not raise an error.
763 This is helpful for callers that only expect None for a missing branch
763 This is helpful for callers that only expect None for a missing branch
764 (e.g. namespace).
764 (e.g. namespace).
765
765
766 '''
766 '''
767 try:
767 try:
768 return self.branchmap().branchtip(branch)
768 return self.branchmap().branchtip(branch)
769 except KeyError:
769 except KeyError:
770 if not ignoremissing:
770 if not ignoremissing:
771 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
771 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
772 else:
772 else:
773 pass
773 pass
774
774
775 def lookup(self, key):
775 def lookup(self, key):
776 return self[key].node()
776 return self[key].node()
777
777
778 def lookupbranch(self, key, remote=None):
778 def lookupbranch(self, key, remote=None):
779 repo = remote or self
779 repo = remote or self
780 if key in repo.branchmap():
780 if key in repo.branchmap():
781 return key
781 return key
782
782
783 repo = (remote and remote.local()) and remote or self
783 repo = (remote and remote.local()) and remote or self
784 return repo[key].branch()
784 return repo[key].branch()
785
785
786 def known(self, nodes):
786 def known(self, nodes):
787 nm = self.changelog.nodemap
787 nm = self.changelog.nodemap
788 pc = self._phasecache
788 pc = self._phasecache
789 result = []
789 result = []
790 for n in nodes:
790 for n in nodes:
791 r = nm.get(n)
791 r = nm.get(n)
792 resp = not (r is None or pc.phase(self, r) >= phases.secret)
792 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 result.append(resp)
793 result.append(resp)
794 return result
794 return result
795
795
796 def local(self):
796 def local(self):
797 return self
797 return self
798
798
799 def cancopy(self):
799 def cancopy(self):
800 # so statichttprepo's override of local() works
800 # so statichttprepo's override of local() works
801 if not self.local():
801 if not self.local():
802 return False
802 return False
803 if not self.ui.configbool('phases', 'publish', True):
803 if not self.ui.configbool('phases', 'publish', True):
804 return True
804 return True
805 # if publishing we can't copy if there is filtered content
805 # if publishing we can't copy if there is filtered content
806 return not self.filtered('visible').changelog.filteredrevs
806 return not self.filtered('visible').changelog.filteredrevs
807
807
808 def shared(self):
808 def shared(self):
809 '''the type of shared repository (None if not shared)'''
809 '''the type of shared repository (None if not shared)'''
810 if self.sharedpath != self.path:
810 if self.sharedpath != self.path:
811 return 'store'
811 return 'store'
812 return None
812 return None
813
813
814 def join(self, f, *insidef):
814 def join(self, f, *insidef):
815 return self.vfs.join(os.path.join(f, *insidef))
815 return self.vfs.join(os.path.join(f, *insidef))
816
816
817 def wjoin(self, f, *insidef):
817 def wjoin(self, f, *insidef):
818 return self.vfs.reljoin(self.root, f, *insidef)
818 return self.vfs.reljoin(self.root, f, *insidef)
819
819
820 def file(self, f):
820 def file(self, f):
821 if f[0] == '/':
821 if f[0] == '/':
822 f = f[1:]
822 f = f[1:]
823 return filelog.filelog(self.svfs, f)
823 return filelog.filelog(self.svfs, f)
824
824
825 def changectx(self, changeid):
825 def changectx(self, changeid):
826 return self[changeid]
826 return self[changeid]
827
827
828 def parents(self, changeid=None):
828 def parents(self, changeid=None):
829 '''get list of changectxs for parents of changeid'''
829 '''get list of changectxs for parents of changeid'''
830 return self[changeid].parents()
830 return self[changeid].parents()
831
831
832 def setparents(self, p1, p2=nullid):
832 def setparents(self, p1, p2=nullid):
833 self.dirstate.beginparentchange()
833 self.dirstate.beginparentchange()
834 copies = self.dirstate.setparents(p1, p2)
834 copies = self.dirstate.setparents(p1, p2)
835 pctx = self[p1]
835 pctx = self[p1]
836 if copies:
836 if copies:
837 # Adjust copy records, the dirstate cannot do it, it
837 # Adjust copy records, the dirstate cannot do it, it
838 # requires access to parents manifests. Preserve them
838 # requires access to parents manifests. Preserve them
839 # only for entries added to first parent.
839 # only for entries added to first parent.
840 for f in copies:
840 for f in copies:
841 if f not in pctx and copies[f] in pctx:
841 if f not in pctx and copies[f] in pctx:
842 self.dirstate.copy(copies[f], f)
842 self.dirstate.copy(copies[f], f)
843 if p2 == nullid:
843 if p2 == nullid:
844 for f, s in sorted(self.dirstate.copies().items()):
844 for f, s in sorted(self.dirstate.copies().items()):
845 if f not in pctx and s not in pctx:
845 if f not in pctx and s not in pctx:
846 self.dirstate.copy(None, f)
846 self.dirstate.copy(None, f)
847 self.dirstate.endparentchange()
847 self.dirstate.endparentchange()
848
848
849 def filectx(self, path, changeid=None, fileid=None):
849 def filectx(self, path, changeid=None, fileid=None):
850 """changeid can be a changeset revision, node, or tag.
850 """changeid can be a changeset revision, node, or tag.
851 fileid can be a file revision or node."""
851 fileid can be a file revision or node."""
852 return context.filectx(self, path, changeid, fileid)
852 return context.filectx(self, path, changeid, fileid)
853
853
854 def getcwd(self):
854 def getcwd(self):
855 return self.dirstate.getcwd()
855 return self.dirstate.getcwd()
856
856
857 def pathto(self, f, cwd=None):
857 def pathto(self, f, cwd=None):
858 return self.dirstate.pathto(f, cwd)
858 return self.dirstate.pathto(f, cwd)
859
859
860 def wfile(self, f, mode='r'):
860 def wfile(self, f, mode='r'):
861 return self.wvfs(f, mode)
861 return self.wvfs(f, mode)
862
862
863 def _link(self, f):
863 def _link(self, f):
864 return self.wvfs.islink(f)
864 return self.wvfs.islink(f)
865
865
866 def _loadfilter(self, filter):
866 def _loadfilter(self, filter):
867 if filter not in self.filterpats:
867 if filter not in self.filterpats:
868 l = []
868 l = []
869 for pat, cmd in self.ui.configitems(filter):
869 for pat, cmd in self.ui.configitems(filter):
870 if cmd == '!':
870 if cmd == '!':
871 continue
871 continue
872 mf = matchmod.match(self.root, '', [pat])
872 mf = matchmod.match(self.root, '', [pat])
873 fn = None
873 fn = None
874 params = cmd
874 params = cmd
875 for name, filterfn in self._datafilters.iteritems():
875 for name, filterfn in self._datafilters.iteritems():
876 if cmd.startswith(name):
876 if cmd.startswith(name):
877 fn = filterfn
877 fn = filterfn
878 params = cmd[len(name):].lstrip()
878 params = cmd[len(name):].lstrip()
879 break
879 break
880 if not fn:
880 if not fn:
881 fn = lambda s, c, **kwargs: util.filter(s, c)
881 fn = lambda s, c, **kwargs: util.filter(s, c)
882 # Wrap old filters not supporting keyword arguments
882 # Wrap old filters not supporting keyword arguments
883 if not inspect.getargspec(fn)[2]:
883 if not inspect.getargspec(fn)[2]:
884 oldfn = fn
884 oldfn = fn
885 fn = lambda s, c, **kwargs: oldfn(s, c)
885 fn = lambda s, c, **kwargs: oldfn(s, c)
886 l.append((mf, fn, params))
886 l.append((mf, fn, params))
887 self.filterpats[filter] = l
887 self.filterpats[filter] = l
888 return self.filterpats[filter]
888 return self.filterpats[filter]
889
889
890 def _filter(self, filterpats, filename, data):
890 def _filter(self, filterpats, filename, data):
891 for mf, fn, cmd in filterpats:
891 for mf, fn, cmd in filterpats:
892 if mf(filename):
892 if mf(filename):
893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 break
895 break
896
896
897 return data
897 return data
898
898
899 @unfilteredpropertycache
899 @unfilteredpropertycache
900 def _encodefilterpats(self):
900 def _encodefilterpats(self):
901 return self._loadfilter('encode')
901 return self._loadfilter('encode')
902
902
903 @unfilteredpropertycache
903 @unfilteredpropertycache
904 def _decodefilterpats(self):
904 def _decodefilterpats(self):
905 return self._loadfilter('decode')
905 return self._loadfilter('decode')
906
906
907 def adddatafilter(self, name, filter):
907 def adddatafilter(self, name, filter):
908 self._datafilters[name] = filter
908 self._datafilters[name] = filter
909
909
910 def wread(self, filename):
910 def wread(self, filename):
911 if self._link(filename):
911 if self._link(filename):
912 data = self.wvfs.readlink(filename)
912 data = self.wvfs.readlink(filename)
913 else:
913 else:
914 data = self.wvfs.read(filename)
914 data = self.wvfs.read(filename)
915 return self._filter(self._encodefilterpats, filename, data)
915 return self._filter(self._encodefilterpats, filename, data)
916
916
917 def wwrite(self, filename, data, flags):
917 def wwrite(self, filename, data, flags):
918 """write ``data`` into ``filename`` in the working directory
918 """write ``data`` into ``filename`` in the working directory
919
919
920 This returns length of written (maybe decoded) data.
920 This returns length of written (maybe decoded) data.
921 """
921 """
922 data = self._filter(self._decodefilterpats, filename, data)
922 data = self._filter(self._decodefilterpats, filename, data)
923 if 'l' in flags:
923 if 'l' in flags:
924 self.wvfs.symlink(data, filename)
924 self.wvfs.symlink(data, filename)
925 else:
925 else:
926 self.wvfs.write(filename, data)
926 self.wvfs.write(filename, data)
927 if 'x' in flags:
927 if 'x' in flags:
928 self.wvfs.setflags(filename, False, True)
928 self.wvfs.setflags(filename, False, True)
929 return len(data)
929 return len(data)
930
930
931 def wwritedata(self, filename, data):
931 def wwritedata(self, filename, data):
932 return self._filter(self._decodefilterpats, filename, data)
932 return self._filter(self._decodefilterpats, filename, data)
933
933
934 def currenttransaction(self):
934 def currenttransaction(self):
935 """return the current transaction or None if non exists"""
935 """return the current transaction or None if non exists"""
936 if self._transref:
936 if self._transref:
937 tr = self._transref()
937 tr = self._transref()
938 else:
938 else:
939 tr = None
939 tr = None
940
940
941 if tr and tr.running():
941 if tr and tr.running():
942 return tr
942 return tr
943 return None
943 return None
944
944
945 def transaction(self, desc, report=None):
945 def transaction(self, desc, report=None):
946 if (self.ui.configbool('devel', 'all')
946 if (self.ui.configbool('devel', 'all')
947 or self.ui.configbool('devel', 'check-locks')):
947 or self.ui.configbool('devel', 'check-locks')):
948 l = self._lockref and self._lockref()
948 l = self._lockref and self._lockref()
949 if l is None or not l.held:
949 if l is None or not l.held:
950 scmutil.develwarn(self.ui, 'transaction with no lock')
950 scmutil.develwarn(self.ui, 'transaction with no lock')
951 tr = self.currenttransaction()
951 tr = self.currenttransaction()
952 if tr is not None:
952 if tr is not None:
953 return tr.nest()
953 return tr.nest()
954
954
955 # abort here if the journal already exists
955 # abort here if the journal already exists
956 if self.svfs.exists("journal"):
956 if self.svfs.exists("journal"):
957 raise error.RepoError(
957 raise error.RepoError(
958 _("abandoned transaction found"),
958 _("abandoned transaction found"),
959 hint=_("run 'hg recover' to clean up transaction"))
959 hint=_("run 'hg recover' to clean up transaction"))
960
960
961 self.hook('pretxnopen', throw=True, txnname=desc)
961 self.hook('pretxnopen', throw=True, txnname=desc)
962
962
963 self._writejournal(desc)
963 self._writejournal(desc)
964 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
964 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
965 if report:
965 if report:
966 rp = report
966 rp = report
967 else:
967 else:
968 rp = self.ui.warn
968 rp = self.ui.warn
969 vfsmap = {'plain': self.vfs} # root of .hg/
969 vfsmap = {'plain': self.vfs} # root of .hg/
970 # we must avoid cyclic reference between repo and transaction.
970 # we must avoid cyclic reference between repo and transaction.
971 reporef = weakref.ref(self)
971 reporef = weakref.ref(self)
972 def validate(tr):
972 def validate(tr):
973 """will run pre-closing hooks"""
973 """will run pre-closing hooks"""
974 pending = lambda: tr.writepending() and self.root or ""
974 pending = lambda: tr.writepending() and self.root or ""
975 reporef().hook('pretxnclose', throw=True, pending=pending,
975 reporef().hook('pretxnclose', throw=True, pending=pending,
976 xnname=desc, **tr.hookargs)
976 xnname=desc, **tr.hookargs)
977
977
978 tr = transaction.transaction(rp, self.sopener, vfsmap,
978 tr = transaction.transaction(rp, self.sopener, vfsmap,
979 "journal",
979 "journal",
980 "undo",
980 "undo",
981 aftertrans(renames),
981 aftertrans(renames),
982 self.store.createmode,
982 self.store.createmode,
983 validator=validate)
983 validator=validate)
984
984
985 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
985 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
986 tr.hookargs['TXNID'] = trid
986 tr.hookargs['TXNID'] = trid
987 # note: writing the fncache only during finalize mean that the file is
987 # note: writing the fncache only during finalize mean that the file is
988 # outdated when running hooks. As fncache is used for streaming clone,
988 # outdated when running hooks. As fncache is used for streaming clone,
989 # this is not expected to break anything that happen during the hooks.
989 # this is not expected to break anything that happen during the hooks.
990 tr.addfinalize('flush-fncache', self.store.write)
990 tr.addfinalize('flush-fncache', self.store.write)
991 def txnclosehook(tr2):
991 def txnclosehook(tr2):
992 """To be run if transaction is successful, will schedule a hook run
992 """To be run if transaction is successful, will schedule a hook run
993 """
993 """
994 def hook():
994 def hook():
995 reporef().hook('txnclose', throw=False, txnname=desc,
995 reporef().hook('txnclose', throw=False, txnname=desc,
996 **tr2.hookargs)
996 **tr2.hookargs)
997 reporef()._afterlock(hook)
997 reporef()._afterlock(hook)
998 tr.addfinalize('txnclose-hook', txnclosehook)
998 tr.addfinalize('txnclose-hook', txnclosehook)
999 def txnaborthook(tr2):
999 def txnaborthook(tr2):
1000 """To be run if transaction is aborted
1000 """To be run if transaction is aborted
1001 """
1001 """
1002 reporef().hook('txnabort', throw=False, txnname=desc,
1002 reporef().hook('txnabort', throw=False, txnname=desc,
1003 **tr2.hookargs)
1003 **tr2.hookargs)
1004 tr.addabort('txnabort-hook', txnaborthook)
1004 tr.addabort('txnabort-hook', txnaborthook)
1005 self._transref = weakref.ref(tr)
1005 self._transref = weakref.ref(tr)
1006 return tr
1006 return tr
1007
1007
1008 def _journalfiles(self):
1008 def _journalfiles(self):
1009 return ((self.svfs, 'journal'),
1009 return ((self.svfs, 'journal'),
1010 (self.vfs, 'journal.dirstate'),
1010 (self.vfs, 'journal.dirstate'),
1011 (self.vfs, 'journal.branch'),
1011 (self.vfs, 'journal.branch'),
1012 (self.vfs, 'journal.desc'),
1012 (self.vfs, 'journal.desc'),
1013 (self.vfs, 'journal.bookmarks'),
1013 (self.vfs, 'journal.bookmarks'),
1014 (self.svfs, 'journal.phaseroots'))
1014 (self.svfs, 'journal.phaseroots'))
1015
1015
1016 def undofiles(self):
1016 def undofiles(self):
1017 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1017 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1018
1018
1019 def _writejournal(self, desc):
1019 def _writejournal(self, desc):
1020 self.vfs.write("journal.dirstate",
1020 self.vfs.write("journal.dirstate",
1021 self.vfs.tryread("dirstate"))
1021 self.vfs.tryread("dirstate"))
1022 self.vfs.write("journal.branch",
1022 self.vfs.write("journal.branch",
1023 encoding.fromlocal(self.dirstate.branch()))
1023 encoding.fromlocal(self.dirstate.branch()))
1024 self.vfs.write("journal.desc",
1024 self.vfs.write("journal.desc",
1025 "%d\n%s\n" % (len(self), desc))
1025 "%d\n%s\n" % (len(self), desc))
1026 self.vfs.write("journal.bookmarks",
1026 self.vfs.write("journal.bookmarks",
1027 self.vfs.tryread("bookmarks"))
1027 self.vfs.tryread("bookmarks"))
1028 self.svfs.write("journal.phaseroots",
1028 self.svfs.write("journal.phaseroots",
1029 self.svfs.tryread("phaseroots"))
1029 self.svfs.tryread("phaseroots"))
1030
1030
1031 def recover(self):
1031 def recover(self):
1032 lock = self.lock()
1032 lock = self.lock()
1033 try:
1033 try:
1034 if self.svfs.exists("journal"):
1034 if self.svfs.exists("journal"):
1035 self.ui.status(_("rolling back interrupted transaction\n"))
1035 self.ui.status(_("rolling back interrupted transaction\n"))
1036 vfsmap = {'': self.svfs,
1036 vfsmap = {'': self.svfs,
1037 'plain': self.vfs,}
1037 'plain': self.vfs,}
1038 transaction.rollback(self.svfs, vfsmap, "journal",
1038 transaction.rollback(self.svfs, vfsmap, "journal",
1039 self.ui.warn)
1039 self.ui.warn)
1040 self.invalidate()
1040 self.invalidate()
1041 return True
1041 return True
1042 else:
1042 else:
1043 self.ui.warn(_("no interrupted transaction available\n"))
1043 self.ui.warn(_("no interrupted transaction available\n"))
1044 return False
1044 return False
1045 finally:
1045 finally:
1046 lock.release()
1046 lock.release()
1047
1047
1048 def rollback(self, dryrun=False, force=False):
1048 def rollback(self, dryrun=False, force=False):
1049 wlock = lock = None
1049 wlock = lock = None
1050 try:
1050 try:
1051 wlock = self.wlock()
1051 wlock = self.wlock()
1052 lock = self.lock()
1052 lock = self.lock()
1053 if self.svfs.exists("undo"):
1053 if self.svfs.exists("undo"):
1054 return self._rollback(dryrun, force)
1054 return self._rollback(dryrun, force)
1055 else:
1055 else:
1056 self.ui.warn(_("no rollback information available\n"))
1056 self.ui.warn(_("no rollback information available\n"))
1057 return 1
1057 return 1
1058 finally:
1058 finally:
1059 release(lock, wlock)
1059 release(lock, wlock)
1060
1060
1061 @unfilteredmethod # Until we get smarter cache management
1061 @unfilteredmethod # Until we get smarter cache management
1062 def _rollback(self, dryrun, force):
1062 def _rollback(self, dryrun, force):
1063 ui = self.ui
1063 ui = self.ui
1064 try:
1064 try:
1065 args = self.vfs.read('undo.desc').splitlines()
1065 args = self.vfs.read('undo.desc').splitlines()
1066 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1066 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1067 if len(args) >= 3:
1067 if len(args) >= 3:
1068 detail = args[2]
1068 detail = args[2]
1069 oldtip = oldlen - 1
1069 oldtip = oldlen - 1
1070
1070
1071 if detail and ui.verbose:
1071 if detail and ui.verbose:
1072 msg = (_('repository tip rolled back to revision %s'
1072 msg = (_('repository tip rolled back to revision %s'
1073 ' (undo %s: %s)\n')
1073 ' (undo %s: %s)\n')
1074 % (oldtip, desc, detail))
1074 % (oldtip, desc, detail))
1075 else:
1075 else:
1076 msg = (_('repository tip rolled back to revision %s'
1076 msg = (_('repository tip rolled back to revision %s'
1077 ' (undo %s)\n')
1077 ' (undo %s)\n')
1078 % (oldtip, desc))
1078 % (oldtip, desc))
1079 except IOError:
1079 except IOError:
1080 msg = _('rolling back unknown transaction\n')
1080 msg = _('rolling back unknown transaction\n')
1081 desc = None
1081 desc = None
1082
1082
1083 if not force and self['.'] != self['tip'] and desc == 'commit':
1083 if not force and self['.'] != self['tip'] and desc == 'commit':
1084 raise util.Abort(
1084 raise util.Abort(
1085 _('rollback of last commit while not checked out '
1085 _('rollback of last commit while not checked out '
1086 'may lose data'), hint=_('use -f to force'))
1086 'may lose data'), hint=_('use -f to force'))
1087
1087
1088 ui.status(msg)
1088 ui.status(msg)
1089 if dryrun:
1089 if dryrun:
1090 return 0
1090 return 0
1091
1091
1092 parents = self.dirstate.parents()
1092 parents = self.dirstate.parents()
1093 self.destroying()
1093 self.destroying()
1094 vfsmap = {'plain': self.vfs, '': self.svfs}
1094 vfsmap = {'plain': self.vfs, '': self.svfs}
1095 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1095 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1096 if self.vfs.exists('undo.bookmarks'):
1096 if self.vfs.exists('undo.bookmarks'):
1097 self.vfs.rename('undo.bookmarks', 'bookmarks')
1097 self.vfs.rename('undo.bookmarks', 'bookmarks')
1098 if self.svfs.exists('undo.phaseroots'):
1098 if self.svfs.exists('undo.phaseroots'):
1099 self.svfs.rename('undo.phaseroots', 'phaseroots')
1099 self.svfs.rename('undo.phaseroots', 'phaseroots')
1100 self.invalidate()
1100 self.invalidate()
1101
1101
1102 parentgone = (parents[0] not in self.changelog.nodemap or
1102 parentgone = (parents[0] not in self.changelog.nodemap or
1103 parents[1] not in self.changelog.nodemap)
1103 parents[1] not in self.changelog.nodemap)
1104 if parentgone:
1104 if parentgone:
1105 self.vfs.rename('undo.dirstate', 'dirstate')
1105 self.vfs.rename('undo.dirstate', 'dirstate')
1106 try:
1106 try:
1107 branch = self.vfs.read('undo.branch')
1107 branch = self.vfs.read('undo.branch')
1108 self.dirstate.setbranch(encoding.tolocal(branch))
1108 self.dirstate.setbranch(encoding.tolocal(branch))
1109 except IOError:
1109 except IOError:
1110 ui.warn(_('named branch could not be reset: '
1110 ui.warn(_('named branch could not be reset: '
1111 'current branch is still \'%s\'\n')
1111 'current branch is still \'%s\'\n')
1112 % self.dirstate.branch())
1112 % self.dirstate.branch())
1113
1113
1114 self.dirstate.invalidate()
1114 self.dirstate.invalidate()
1115 parents = tuple([p.rev() for p in self.parents()])
1115 parents = tuple([p.rev() for p in self.parents()])
1116 if len(parents) > 1:
1116 if len(parents) > 1:
1117 ui.status(_('working directory now based on '
1117 ui.status(_('working directory now based on '
1118 'revisions %d and %d\n') % parents)
1118 'revisions %d and %d\n') % parents)
1119 else:
1119 else:
1120 ui.status(_('working directory now based on '
1120 ui.status(_('working directory now based on '
1121 'revision %d\n') % parents)
1121 'revision %d\n') % parents)
1122 ms = mergemod.mergestate(self)
1122 ms = mergemod.mergestate(self)
1123 ms.reset(self['.'].node())
1123 ms.reset(self['.'].node())
1124
1124
1125 # TODO: if we know which new heads may result from this rollback, pass
1125 # TODO: if we know which new heads may result from this rollback, pass
1126 # them to destroy(), which will prevent the branchhead cache from being
1126 # them to destroy(), which will prevent the branchhead cache from being
1127 # invalidated.
1127 # invalidated.
1128 self.destroyed()
1128 self.destroyed()
1129 return 0
1129 return 0
1130
1130
1131 def invalidatecaches(self):
1131 def invalidatecaches(self):
1132
1132
1133 if '_tagscache' in vars(self):
1133 if '_tagscache' in vars(self):
1134 # can't use delattr on proxy
1134 # can't use delattr on proxy
1135 del self.__dict__['_tagscache']
1135 del self.__dict__['_tagscache']
1136
1136
1137 self.unfiltered()._branchcaches.clear()
1137 self.unfiltered()._branchcaches.clear()
1138 self.invalidatevolatilesets()
1138 self.invalidatevolatilesets()
1139
1139
1140 def invalidatevolatilesets(self):
1140 def invalidatevolatilesets(self):
1141 self.filteredrevcache.clear()
1141 self.filteredrevcache.clear()
1142 obsolete.clearobscaches(self)
1142 obsolete.clearobscaches(self)
1143
1143
1144 def invalidatedirstate(self):
1144 def invalidatedirstate(self):
1145 '''Invalidates the dirstate, causing the next call to dirstate
1145 '''Invalidates the dirstate, causing the next call to dirstate
1146 to check if it was modified since the last time it was read,
1146 to check if it was modified since the last time it was read,
1147 rereading it if it has.
1147 rereading it if it has.
1148
1148
1149 This is different to dirstate.invalidate() that it doesn't always
1149 This is different to dirstate.invalidate() that it doesn't always
1150 rereads the dirstate. Use dirstate.invalidate() if you want to
1150 rereads the dirstate. Use dirstate.invalidate() if you want to
1151 explicitly read the dirstate again (i.e. restoring it to a previous
1151 explicitly read the dirstate again (i.e. restoring it to a previous
1152 known good state).'''
1152 known good state).'''
1153 if hasunfilteredcache(self, 'dirstate'):
1153 if hasunfilteredcache(self, 'dirstate'):
1154 for k in self.dirstate._filecache:
1154 for k in self.dirstate._filecache:
1155 try:
1155 try:
1156 delattr(self.dirstate, k)
1156 delattr(self.dirstate, k)
1157 except AttributeError:
1157 except AttributeError:
1158 pass
1158 pass
1159 delattr(self.unfiltered(), 'dirstate')
1159 delattr(self.unfiltered(), 'dirstate')
1160
1160
1161 def invalidate(self):
1161 def invalidate(self):
1162 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1162 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1163 for k in self._filecache:
1163 for k in self._filecache:
1164 # dirstate is invalidated separately in invalidatedirstate()
1164 # dirstate is invalidated separately in invalidatedirstate()
1165 if k == 'dirstate':
1165 if k == 'dirstate':
1166 continue
1166 continue
1167
1167
1168 try:
1168 try:
1169 delattr(unfiltered, k)
1169 delattr(unfiltered, k)
1170 except AttributeError:
1170 except AttributeError:
1171 pass
1171 pass
1172 self.invalidatecaches()
1172 self.invalidatecaches()
1173 self.store.invalidatecaches()
1173 self.store.invalidatecaches()
1174
1174
1175 def invalidateall(self):
1175 def invalidateall(self):
1176 '''Fully invalidates both store and non-store parts, causing the
1176 '''Fully invalidates both store and non-store parts, causing the
1177 subsequent operation to reread any outside changes.'''
1177 subsequent operation to reread any outside changes.'''
1178 # extension should hook this to invalidate its caches
1178 # extension should hook this to invalidate its caches
1179 self.invalidate()
1179 self.invalidate()
1180 self.invalidatedirstate()
1180 self.invalidatedirstate()
1181
1181
1182 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1182 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1183 try:
1183 try:
1184 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1184 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1185 except error.LockHeld, inst:
1185 except error.LockHeld, inst:
1186 if not wait:
1186 if not wait:
1187 raise
1187 raise
1188 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1188 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1189 (desc, inst.locker))
1189 (desc, inst.locker))
1190 # default to 600 seconds timeout
1190 # default to 600 seconds timeout
1191 l = lockmod.lock(vfs, lockname,
1191 l = lockmod.lock(vfs, lockname,
1192 int(self.ui.config("ui", "timeout", "600")),
1192 int(self.ui.config("ui", "timeout", "600")),
1193 releasefn, desc=desc)
1193 releasefn, desc=desc)
1194 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1194 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1195 if acquirefn:
1195 if acquirefn:
1196 acquirefn()
1196 acquirefn()
1197 return l
1197 return l
1198
1198
1199 def _afterlock(self, callback):
1199 def _afterlock(self, callback):
1200 """add a callback to be run when the repository is fully unlocked
1200 """add a callback to be run when the repository is fully unlocked
1201
1201
1202 The callback will be executed when the outermost lock is released
1202 The callback will be executed when the outermost lock is released
1203 (with wlock being higher level than 'lock')."""
1203 (with wlock being higher level than 'lock')."""
1204 for ref in (self._wlockref, self._lockref):
1204 for ref in (self._wlockref, self._lockref):
1205 l = ref and ref()
1205 l = ref and ref()
1206 if l and l.held:
1206 if l and l.held:
1207 l.postrelease.append(callback)
1207 l.postrelease.append(callback)
1208 break
1208 break
1209 else: # no lock have been found.
1209 else: # no lock have been found.
1210 callback()
1210 callback()
1211
1211
1212 def lock(self, wait=True):
1212 def lock(self, wait=True):
1213 '''Lock the repository store (.hg/store) and return a weak reference
1213 '''Lock the repository store (.hg/store) and return a weak reference
1214 to the lock. Use this before modifying the store (e.g. committing or
1214 to the lock. Use this before modifying the store (e.g. committing or
1215 stripping). If you are opening a transaction, get a lock as well.)
1215 stripping). If you are opening a transaction, get a lock as well.)
1216
1216
1217 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1217 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1218 'wlock' first to avoid a dead-lock hazard.'''
1218 'wlock' first to avoid a dead-lock hazard.'''
1219 l = self._lockref and self._lockref()
1219 l = self._lockref and self._lockref()
1220 if l is not None and l.held:
1220 if l is not None and l.held:
1221 l.lock()
1221 l.lock()
1222 return l
1222 return l
1223
1223
1224 def unlock():
1224 def unlock():
1225 for k, ce in self._filecache.items():
1225 for k, ce in self._filecache.items():
1226 if k == 'dirstate' or k not in self.__dict__:
1226 if k == 'dirstate' or k not in self.__dict__:
1227 continue
1227 continue
1228 ce.refresh()
1228 ce.refresh()
1229
1229
1230 l = self._lock(self.svfs, "lock", wait, unlock,
1230 l = self._lock(self.svfs, "lock", wait, unlock,
1231 self.invalidate, _('repository %s') % self.origroot)
1231 self.invalidate, _('repository %s') % self.origroot)
1232 self._lockref = weakref.ref(l)
1232 self._lockref = weakref.ref(l)
1233 return l
1233 return l
1234
1234
1235 def wlock(self, wait=True):
1235 def wlock(self, wait=True):
1236 '''Lock the non-store parts of the repository (everything under
1236 '''Lock the non-store parts of the repository (everything under
1237 .hg except .hg/store) and return a weak reference to the lock.
1237 .hg except .hg/store) and return a weak reference to the lock.
1238
1238
1239 Use this before modifying files in .hg.
1239 Use this before modifying files in .hg.
1240
1240
1241 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1241 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1242 'wlock' first to avoid a dead-lock hazard.'''
1242 'wlock' first to avoid a dead-lock hazard.'''
1243 l = self._wlockref and self._wlockref()
1243 l = self._wlockref and self._wlockref()
1244 if l is not None and l.held:
1244 if l is not None and l.held:
1245 l.lock()
1245 l.lock()
1246 return l
1246 return l
1247
1247
1248 # We do not need to check for non-waiting lock aquisition. Such
1248 # We do not need to check for non-waiting lock aquisition. Such
1249 # acquisition would not cause dead-lock as they would just fail.
1249 # acquisition would not cause dead-lock as they would just fail.
1250 if wait and (self.ui.configbool('devel', 'all')
1250 if wait and (self.ui.configbool('devel', 'all')
1251 or self.ui.configbool('devel', 'check-locks')):
1251 or self.ui.configbool('devel', 'check-locks')):
1252 l = self._lockref and self._lockref()
1252 l = self._lockref and self._lockref()
1253 if l is not None and l.held:
1253 if l is not None and l.held:
1254 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1254 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1255
1255
1256 def unlock():
1256 def unlock():
1257 if self.dirstate.pendingparentchange():
1257 if self.dirstate.pendingparentchange():
1258 self.dirstate.invalidate()
1258 self.dirstate.invalidate()
1259 else:
1259 else:
1260 self.dirstate.write()
1260 self.dirstate.write()
1261
1261
1262 self._filecache['dirstate'].refresh()
1262 self._filecache['dirstate'].refresh()
1263
1263
1264 l = self._lock(self.vfs, "wlock", wait, unlock,
1264 l = self._lock(self.vfs, "wlock", wait, unlock,
1265 self.invalidatedirstate, _('working directory of %s') %
1265 self.invalidatedirstate, _('working directory of %s') %
1266 self.origroot)
1266 self.origroot)
1267 self._wlockref = weakref.ref(l)
1267 self._wlockref = weakref.ref(l)
1268 return l
1268 return l
1269
1269
1270 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1270 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1271 """
1271 """
1272 commit an individual file as part of a larger transaction
1272 commit an individual file as part of a larger transaction
1273 """
1273 """
1274
1274
1275 fname = fctx.path()
1275 fname = fctx.path()
1276 fparent1 = manifest1.get(fname, nullid)
1276 fparent1 = manifest1.get(fname, nullid)
1277 fparent2 = manifest2.get(fname, nullid)
1277 fparent2 = manifest2.get(fname, nullid)
1278 if isinstance(fctx, context.filectx):
1278 if isinstance(fctx, context.filectx):
1279 node = fctx.filenode()
1279 node = fctx.filenode()
1280 if node in [fparent1, fparent2]:
1280 if node in [fparent1, fparent2]:
1281 self.ui.debug('reusing %s filelog entry\n' % fname)
1281 self.ui.debug('reusing %s filelog entry\n' % fname)
1282 return node
1282 return node
1283
1283
1284 flog = self.file(fname)
1284 flog = self.file(fname)
1285 meta = {}
1285 meta = {}
1286 copy = fctx.renamed()
1286 copy = fctx.renamed()
1287 if copy and copy[0] != fname:
1287 if copy and copy[0] != fname:
1288 # Mark the new revision of this file as a copy of another
1288 # Mark the new revision of this file as a copy of another
1289 # file. This copy data will effectively act as a parent
1289 # file. This copy data will effectively act as a parent
1290 # of this new revision. If this is a merge, the first
1290 # of this new revision. If this is a merge, the first
1291 # parent will be the nullid (meaning "look up the copy data")
1291 # parent will be the nullid (meaning "look up the copy data")
1292 # and the second one will be the other parent. For example:
1292 # and the second one will be the other parent. For example:
1293 #
1293 #
1294 # 0 --- 1 --- 3 rev1 changes file foo
1294 # 0 --- 1 --- 3 rev1 changes file foo
1295 # \ / rev2 renames foo to bar and changes it
1295 # \ / rev2 renames foo to bar and changes it
1296 # \- 2 -/ rev3 should have bar with all changes and
1296 # \- 2 -/ rev3 should have bar with all changes and
1297 # should record that bar descends from
1297 # should record that bar descends from
1298 # bar in rev2 and foo in rev1
1298 # bar in rev2 and foo in rev1
1299 #
1299 #
1300 # this allows this merge to succeed:
1300 # this allows this merge to succeed:
1301 #
1301 #
1302 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1302 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1303 # \ / merging rev3 and rev4 should use bar@rev2
1303 # \ / merging rev3 and rev4 should use bar@rev2
1304 # \- 2 --- 4 as the merge base
1304 # \- 2 --- 4 as the merge base
1305 #
1305 #
1306
1306
1307 cfname = copy[0]
1307 cfname = copy[0]
1308 crev = manifest1.get(cfname)
1308 crev = manifest1.get(cfname)
1309 newfparent = fparent2
1309 newfparent = fparent2
1310
1310
1311 if manifest2: # branch merge
1311 if manifest2: # branch merge
1312 if fparent2 == nullid or crev is None: # copied on remote side
1312 if fparent2 == nullid or crev is None: # copied on remote side
1313 if cfname in manifest2:
1313 if cfname in manifest2:
1314 crev = manifest2[cfname]
1314 crev = manifest2[cfname]
1315 newfparent = fparent1
1315 newfparent = fparent1
1316
1316
1317 # Here, we used to search backwards through history to try to find
1317 # Here, we used to search backwards through history to try to find
1318 # where the file copy came from if the source of a copy was not in
1318 # where the file copy came from if the source of a copy was not in
1319 # the parent directory. However, this doesn't actually make sense to
1319 # the parent directory. However, this doesn't actually make sense to
1320 # do (what does a copy from something not in your working copy even
1320 # do (what does a copy from something not in your working copy even
1321 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1321 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1322 # the user that copy information was dropped, so if they didn't
1322 # the user that copy information was dropped, so if they didn't
1323 # expect this outcome it can be fixed, but this is the correct
1323 # expect this outcome it can be fixed, but this is the correct
1324 # behavior in this circumstance.
1324 # behavior in this circumstance.
1325
1325
1326 if crev:
1326 if crev:
1327 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1327 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1328 meta["copy"] = cfname
1328 meta["copy"] = cfname
1329 meta["copyrev"] = hex(crev)
1329 meta["copyrev"] = hex(crev)
1330 fparent1, fparent2 = nullid, newfparent
1330 fparent1, fparent2 = nullid, newfparent
1331 else:
1331 else:
1332 self.ui.warn(_("warning: can't find ancestor for '%s' "
1332 self.ui.warn(_("warning: can't find ancestor for '%s' "
1333 "copied from '%s'!\n") % (fname, cfname))
1333 "copied from '%s'!\n") % (fname, cfname))
1334
1334
1335 elif fparent1 == nullid:
1335 elif fparent1 == nullid:
1336 fparent1, fparent2 = fparent2, nullid
1336 fparent1, fparent2 = fparent2, nullid
1337 elif fparent2 != nullid:
1337 elif fparent2 != nullid:
1338 # is one parent an ancestor of the other?
1338 # is one parent an ancestor of the other?
1339 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1339 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1340 if fparent1 in fparentancestors:
1340 if fparent1 in fparentancestors:
1341 fparent1, fparent2 = fparent2, nullid
1341 fparent1, fparent2 = fparent2, nullid
1342 elif fparent2 in fparentancestors:
1342 elif fparent2 in fparentancestors:
1343 fparent2 = nullid
1343 fparent2 = nullid
1344
1344
1345 # is the file changed?
1345 # is the file changed?
1346 text = fctx.data()
1346 text = fctx.data()
1347 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1347 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1348 changelist.append(fname)
1348 changelist.append(fname)
1349 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1349 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1350 # are just the flags changed during merge?
1350 # are just the flags changed during merge?
1351 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1351 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1352 changelist.append(fname)
1352 changelist.append(fname)
1353
1353
1354 return fparent1
1354 return fparent1
1355
1355
1356 @unfilteredmethod
1356 @unfilteredmethod
1357 def commit(self, text="", user=None, date=None, match=None, force=False,
1357 def commit(self, text="", user=None, date=None, match=None, force=False,
1358 editor=False, extra={}):
1358 editor=False, extra={}):
1359 """Add a new revision to current repository.
1359 """Add a new revision to current repository.
1360
1360
1361 Revision information is gathered from the working directory,
1361 Revision information is gathered from the working directory,
1362 match can be used to filter the committed files. If editor is
1362 match can be used to filter the committed files. If editor is
1363 supplied, it is called to get a commit message.
1363 supplied, it is called to get a commit message.
1364 """
1364 """
1365
1365
1366 def fail(f, msg):
1366 def fail(f, msg):
1367 raise util.Abort('%s: %s' % (f, msg))
1367 raise util.Abort('%s: %s' % (f, msg))
1368
1368
1369 if not match:
1369 if not match:
1370 match = matchmod.always(self.root, '')
1370 match = matchmod.always(self.root, '')
1371
1371
1372 if not force:
1372 if not force:
1373 vdirs = []
1373 vdirs = []
1374 match.explicitdir = vdirs.append
1374 match.explicitdir = vdirs.append
1375 match.bad = fail
1375 match.bad = fail
1376
1376
1377 wlock = self.wlock()
1377 wlock = self.wlock()
1378 try:
1378 try:
1379 wctx = self[None]
1379 wctx = self[None]
1380 merge = len(wctx.parents()) > 1
1380 merge = len(wctx.parents()) > 1
1381
1381
1382 if not force and merge and not match.always():
1382 if not force and merge and not match.always():
1383 raise util.Abort(_('cannot partially commit a merge '
1383 raise util.Abort(_('cannot partially commit a merge '
1384 '(do not specify files or patterns)'))
1384 '(do not specify files or patterns)'))
1385
1385
1386 status = self.status(match=match, clean=force)
1386 status = self.status(match=match, clean=force)
1387 if force:
1387 if force:
1388 status.modified.extend(status.clean) # mq may commit clean files
1388 status.modified.extend(status.clean) # mq may commit clean files
1389
1389
1390 # check subrepos
1390 # check subrepos
1391 subs = []
1391 subs = []
1392 commitsubs = set()
1392 commitsubs = set()
1393 newstate = wctx.substate.copy()
1393 newstate = wctx.substate.copy()
1394 # only manage subrepos and .hgsubstate if .hgsub is present
1394 # only manage subrepos and .hgsubstate if .hgsub is present
1395 if '.hgsub' in wctx:
1395 if '.hgsub' in wctx:
1396 # we'll decide whether to track this ourselves, thanks
1396 # we'll decide whether to track this ourselves, thanks
1397 for c in status.modified, status.added, status.removed:
1397 for c in status.modified, status.added, status.removed:
1398 if '.hgsubstate' in c:
1398 if '.hgsubstate' in c:
1399 c.remove('.hgsubstate')
1399 c.remove('.hgsubstate')
1400
1400
1401 # compare current state to last committed state
1401 # compare current state to last committed state
1402 # build new substate based on last committed state
1402 # build new substate based on last committed state
1403 oldstate = wctx.p1().substate
1403 oldstate = wctx.p1().substate
1404 for s in sorted(newstate.keys()):
1404 for s in sorted(newstate.keys()):
1405 if not match(s):
1405 if not match(s):
1406 # ignore working copy, use old state if present
1406 # ignore working copy, use old state if present
1407 if s in oldstate:
1407 if s in oldstate:
1408 newstate[s] = oldstate[s]
1408 newstate[s] = oldstate[s]
1409 continue
1409 continue
1410 if not force:
1410 if not force:
1411 raise util.Abort(
1411 raise util.Abort(
1412 _("commit with new subrepo %s excluded") % s)
1412 _("commit with new subrepo %s excluded") % s)
1413 dirtyreason = wctx.sub(s).dirtyreason(True)
1413 dirtyreason = wctx.sub(s).dirtyreason(True)
1414 if dirtyreason:
1414 if dirtyreason:
1415 if not self.ui.configbool('ui', 'commitsubrepos'):
1415 if not self.ui.configbool('ui', 'commitsubrepos'):
1416 raise util.Abort(dirtyreason,
1416 raise util.Abort(dirtyreason,
1417 hint=_("use --subrepos for recursive commit"))
1417 hint=_("use --subrepos for recursive commit"))
1418 subs.append(s)
1418 subs.append(s)
1419 commitsubs.add(s)
1419 commitsubs.add(s)
1420 else:
1420 else:
1421 bs = wctx.sub(s).basestate()
1421 bs = wctx.sub(s).basestate()
1422 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1422 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1423 if oldstate.get(s, (None, None, None))[1] != bs:
1423 if oldstate.get(s, (None, None, None))[1] != bs:
1424 subs.append(s)
1424 subs.append(s)
1425
1425
1426 # check for removed subrepos
1426 # check for removed subrepos
1427 for p in wctx.parents():
1427 for p in wctx.parents():
1428 r = [s for s in p.substate if s not in newstate]
1428 r = [s for s in p.substate if s not in newstate]
1429 subs += [s for s in r if match(s)]
1429 subs += [s for s in r if match(s)]
1430 if subs:
1430 if subs:
1431 if (not match('.hgsub') and
1431 if (not match('.hgsub') and
1432 '.hgsub' in (wctx.modified() + wctx.added())):
1432 '.hgsub' in (wctx.modified() + wctx.added())):
1433 raise util.Abort(
1433 raise util.Abort(
1434 _("can't commit subrepos without .hgsub"))
1434 _("can't commit subrepos without .hgsub"))
1435 status.modified.insert(0, '.hgsubstate')
1435 status.modified.insert(0, '.hgsubstate')
1436
1436
1437 elif '.hgsub' in status.removed:
1437 elif '.hgsub' in status.removed:
1438 # clean up .hgsubstate when .hgsub is removed
1438 # clean up .hgsubstate when .hgsub is removed
1439 if ('.hgsubstate' in wctx and
1439 if ('.hgsubstate' in wctx and
1440 '.hgsubstate' not in (status.modified + status.added +
1440 '.hgsubstate' not in (status.modified + status.added +
1441 status.removed)):
1441 status.removed)):
1442 status.removed.insert(0, '.hgsubstate')
1442 status.removed.insert(0, '.hgsubstate')
1443
1443
1444 # make sure all explicit patterns are matched
1444 # make sure all explicit patterns are matched
1445 if not force and match.files():
1445 if not force and match.files():
1446 matched = set(status.modified + status.added + status.removed)
1446 matched = set(status.modified + status.added + status.removed)
1447
1447
1448 for f in match.files():
1448 for f in match.files():
1449 f = self.dirstate.normalize(f)
1449 f = self.dirstate.normalize(f)
1450 if f == '.' or f in matched or f in wctx.substate:
1450 if f == '.' or f in matched or f in wctx.substate:
1451 continue
1451 continue
1452 if f in status.deleted:
1452 if f in status.deleted:
1453 fail(f, _('file not found!'))
1453 fail(f, _('file not found!'))
1454 if f in vdirs: # visited directory
1454 if f in vdirs: # visited directory
1455 d = f + '/'
1455 d = f + '/'
1456 for mf in matched:
1456 for mf in matched:
1457 if mf.startswith(d):
1457 if mf.startswith(d):
1458 break
1458 break
1459 else:
1459 else:
1460 fail(f, _("no match under directory!"))
1460 fail(f, _("no match under directory!"))
1461 elif f not in self.dirstate:
1461 elif f not in self.dirstate:
1462 fail(f, _("file not tracked!"))
1462 fail(f, _("file not tracked!"))
1463
1463
1464 cctx = context.workingcommitctx(self, status,
1464 cctx = context.workingcommitctx(self, status,
1465 text, user, date, extra)
1465 text, user, date, extra)
1466
1466
1467 if (not force and not extra.get("close") and not merge
1467 if (not force and not extra.get("close") and not merge
1468 and not cctx.files()
1468 and not cctx.files()
1469 and wctx.branch() == wctx.p1().branch()):
1469 and wctx.branch() == wctx.p1().branch()):
1470 return None
1470 return None
1471
1471
1472 if merge and cctx.deleted():
1472 if merge and cctx.deleted():
1473 raise util.Abort(_("cannot commit merge with missing files"))
1473 raise util.Abort(_("cannot commit merge with missing files"))
1474
1474
1475 ms = mergemod.mergestate(self)
1475 ms = mergemod.mergestate(self)
1476 for f in status.modified:
1476 for f in status.modified:
1477 if f in ms and ms[f] == 'u':
1477 if f in ms and ms[f] == 'u':
1478 raise util.Abort(_('unresolved merge conflicts '
1478 raise util.Abort(_('unresolved merge conflicts '
1479 '(see "hg help resolve")'))
1479 '(see "hg help resolve")'))
1480
1480
1481 if editor:
1481 if editor:
1482 cctx._text = editor(self, cctx, subs)
1482 cctx._text = editor(self, cctx, subs)
1483 edited = (text != cctx._text)
1483 edited = (text != cctx._text)
1484
1484
1485 # Save commit message in case this transaction gets rolled back
1485 # Save commit message in case this transaction gets rolled back
1486 # (e.g. by a pretxncommit hook). Leave the content alone on
1486 # (e.g. by a pretxncommit hook). Leave the content alone on
1487 # the assumption that the user will use the same editor again.
1487 # the assumption that the user will use the same editor again.
1488 msgfn = self.savecommitmessage(cctx._text)
1488 msgfn = self.savecommitmessage(cctx._text)
1489
1489
1490 # commit subs and write new state
1490 # commit subs and write new state
1491 if subs:
1491 if subs:
1492 for s in sorted(commitsubs):
1492 for s in sorted(commitsubs):
1493 sub = wctx.sub(s)
1493 sub = wctx.sub(s)
1494 self.ui.status(_('committing subrepository %s\n') %
1494 self.ui.status(_('committing subrepository %s\n') %
1495 subrepo.subrelpath(sub))
1495 subrepo.subrelpath(sub))
1496 sr = sub.commit(cctx._text, user, date)
1496 sr = sub.commit(cctx._text, user, date)
1497 newstate[s] = (newstate[s][0], sr)
1497 newstate[s] = (newstate[s][0], sr)
1498 subrepo.writestate(self, newstate)
1498 subrepo.writestate(self, newstate)
1499
1499
1500 p1, p2 = self.dirstate.parents()
1500 p1, p2 = self.dirstate.parents()
1501 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1501 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1502 try:
1502 try:
1503 self.hook("precommit", throw=True, parent1=hookp1,
1503 self.hook("precommit", throw=True, parent1=hookp1,
1504 parent2=hookp2)
1504 parent2=hookp2)
1505 ret = self.commitctx(cctx, True)
1505 ret = self.commitctx(cctx, True)
1506 except: # re-raises
1506 except: # re-raises
1507 if edited:
1507 if edited:
1508 self.ui.write(
1508 self.ui.write(
1509 _('note: commit message saved in %s\n') % msgfn)
1509 _('note: commit message saved in %s\n') % msgfn)
1510 raise
1510 raise
1511
1511
1512 # update bookmarks, dirstate and mergestate
1512 # update bookmarks, dirstate and mergestate
1513 bookmarks.update(self, [p1, p2], ret)
1513 bookmarks.update(self, [p1, p2], ret)
1514 cctx.markcommitted(ret)
1514 cctx.markcommitted(ret)
1515 ms.reset()
1515 ms.reset()
1516 finally:
1516 finally:
1517 wlock.release()
1517 wlock.release()
1518
1518
1519 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1519 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1520 # hack for command that use a temporary commit (eg: histedit)
1520 # hack for command that use a temporary commit (eg: histedit)
1521 # temporary commit got stripped before hook release
1521 # temporary commit got stripped before hook release
1522 if node in self:
1522 if node in self:
1523 self.hook("commit", node=node, parent1=parent1,
1523 self.hook("commit", node=node, parent1=parent1,
1524 parent2=parent2)
1524 parent2=parent2)
1525 self._afterlock(commithook)
1525 self._afterlock(commithook)
1526 return ret
1526 return ret
1527
1527
1528 @unfilteredmethod
1528 @unfilteredmethod
1529 def commitctx(self, ctx, error=False):
1529 def commitctx(self, ctx, error=False):
1530 """Add a new revision to current repository.
1530 """Add a new revision to current repository.
1531 Revision information is passed via the context argument.
1531 Revision information is passed via the context argument.
1532 """
1532 """
1533
1533
1534 tr = None
1534 tr = None
1535 p1, p2 = ctx.p1(), ctx.p2()
1535 p1, p2 = ctx.p1(), ctx.p2()
1536 user = ctx.user()
1536 user = ctx.user()
1537
1537
1538 lock = self.lock()
1538 lock = self.lock()
1539 try:
1539 try:
1540 tr = self.transaction("commit")
1540 tr = self.transaction("commit")
1541 trp = weakref.proxy(tr)
1541 trp = weakref.proxy(tr)
1542
1542
1543 if ctx.files():
1543 if ctx.files():
1544 m1 = p1.manifest()
1544 m1 = p1.manifest()
1545 m2 = p2.manifest()
1545 m2 = p2.manifest()
1546 m = m1.copy()
1546 m = m1.copy()
1547
1547
1548 # check in files
1548 # check in files
1549 added = []
1549 added = []
1550 changed = []
1550 changed = []
1551 removed = list(ctx.removed())
1551 removed = list(ctx.removed())
1552 linkrev = len(self)
1552 linkrev = len(self)
1553 self.ui.note(_("committing files:\n"))
1553 self.ui.note(_("committing files:\n"))
1554 for f in sorted(ctx.modified() + ctx.added()):
1554 for f in sorted(ctx.modified() + ctx.added()):
1555 self.ui.note(f + "\n")
1555 self.ui.note(f + "\n")
1556 try:
1556 try:
1557 fctx = ctx[f]
1557 fctx = ctx[f]
1558 if fctx is None:
1558 if fctx is None:
1559 removed.append(f)
1559 removed.append(f)
1560 else:
1560 else:
1561 added.append(f)
1561 added.append(f)
1562 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1562 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1563 trp, changed)
1563 trp, changed)
1564 m.setflag(f, fctx.flags())
1564 m.setflag(f, fctx.flags())
1565 except OSError, inst:
1565 except OSError, inst:
1566 self.ui.warn(_("trouble committing %s!\n") % f)
1566 self.ui.warn(_("trouble committing %s!\n") % f)
1567 raise
1567 raise
1568 except IOError, inst:
1568 except IOError, inst:
1569 errcode = getattr(inst, 'errno', errno.ENOENT)
1569 errcode = getattr(inst, 'errno', errno.ENOENT)
1570 if error or errcode and errcode != errno.ENOENT:
1570 if error or errcode and errcode != errno.ENOENT:
1571 self.ui.warn(_("trouble committing %s!\n") % f)
1571 self.ui.warn(_("trouble committing %s!\n") % f)
1572 raise
1572 raise
1573
1573
1574 # update manifest
1574 # update manifest
1575 self.ui.note(_("committing manifest\n"))
1575 self.ui.note(_("committing manifest\n"))
1576 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1576 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1577 drop = [f for f in removed if f in m]
1577 drop = [f for f in removed if f in m]
1578 for f in drop:
1578 for f in drop:
1579 del m[f]
1579 del m[f]
1580 mn = self.manifest.add(m, trp, linkrev,
1580 mn = self.manifest.add(m, trp, linkrev,
1581 p1.manifestnode(), p2.manifestnode(),
1581 p1.manifestnode(), p2.manifestnode(),
1582 added, drop)
1582 added, drop)
1583 files = changed + removed
1583 files = changed + removed
1584 else:
1584 else:
1585 mn = p1.manifestnode()
1585 mn = p1.manifestnode()
1586 files = []
1586 files = []
1587
1587
1588 # update changelog
1588 # update changelog
1589 self.ui.note(_("committing changelog\n"))
1589 self.ui.note(_("committing changelog\n"))
1590 self.changelog.delayupdate(tr)
1590 self.changelog.delayupdate(tr)
1591 n = self.changelog.add(mn, files, ctx.description(),
1591 n = self.changelog.add(mn, files, ctx.description(),
1592 trp, p1.node(), p2.node(),
1592 trp, p1.node(), p2.node(),
1593 user, ctx.date(), ctx.extra().copy())
1593 user, ctx.date(), ctx.extra().copy())
1594 p = lambda: tr.writepending() and self.root or ""
1594 p = lambda: tr.writepending() and self.root or ""
1595 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1595 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1596 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1596 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1597 parent2=xp2, pending=p)
1597 parent2=xp2, pending=p)
1598 # set the new commit is proper phase
1598 # set the new commit is proper phase
1599 targetphase = subrepo.newcommitphase(self.ui, ctx)
1599 targetphase = subrepo.newcommitphase(self.ui, ctx)
1600 if targetphase:
1600 if targetphase:
1601 # retract boundary do not alter parent changeset.
1601 # retract boundary do not alter parent changeset.
1602 # if a parent have higher the resulting phase will
1602 # if a parent have higher the resulting phase will
1603 # be compliant anyway
1603 # be compliant anyway
1604 #
1604 #
1605 # if minimal phase was 0 we don't need to retract anything
1605 # if minimal phase was 0 we don't need to retract anything
1606 phases.retractboundary(self, tr, targetphase, [n])
1606 phases.retractboundary(self, tr, targetphase, [n])
1607 tr.close()
1607 tr.close()
1608 branchmap.updatecache(self.filtered('served'))
1608 branchmap.updatecache(self.filtered('served'))
1609 return n
1609 return n
1610 finally:
1610 finally:
1611 if tr:
1611 if tr:
1612 tr.release()
1612 tr.release()
1613 lock.release()
1613 lock.release()
1614
1614
1615 @unfilteredmethod
1615 @unfilteredmethod
1616 def destroying(self):
1616 def destroying(self):
1617 '''Inform the repository that nodes are about to be destroyed.
1617 '''Inform the repository that nodes are about to be destroyed.
1618 Intended for use by strip and rollback, so there's a common
1618 Intended for use by strip and rollback, so there's a common
1619 place for anything that has to be done before destroying history.
1619 place for anything that has to be done before destroying history.
1620
1620
1621 This is mostly useful for saving state that is in memory and waiting
1621 This is mostly useful for saving state that is in memory and waiting
1622 to be flushed when the current lock is released. Because a call to
1622 to be flushed when the current lock is released. Because a call to
1623 destroyed is imminent, the repo will be invalidated causing those
1623 destroyed is imminent, the repo will be invalidated causing those
1624 changes to stay in memory (waiting for the next unlock), or vanish
1624 changes to stay in memory (waiting for the next unlock), or vanish
1625 completely.
1625 completely.
1626 '''
1626 '''
1627 # When using the same lock to commit and strip, the phasecache is left
1627 # When using the same lock to commit and strip, the phasecache is left
1628 # dirty after committing. Then when we strip, the repo is invalidated,
1628 # dirty after committing. Then when we strip, the repo is invalidated,
1629 # causing those changes to disappear.
1629 # causing those changes to disappear.
1630 if '_phasecache' in vars(self):
1630 if '_phasecache' in vars(self):
1631 self._phasecache.write()
1631 self._phasecache.write()
1632
1632
1633 @unfilteredmethod
1633 @unfilteredmethod
1634 def destroyed(self):
1634 def destroyed(self):
1635 '''Inform the repository that nodes have been destroyed.
1635 '''Inform the repository that nodes have been destroyed.
1636 Intended for use by strip and rollback, so there's a common
1636 Intended for use by strip and rollback, so there's a common
1637 place for anything that has to be done after destroying history.
1637 place for anything that has to be done after destroying history.
1638 '''
1638 '''
1639 # When one tries to:
1639 # When one tries to:
1640 # 1) destroy nodes thus calling this method (e.g. strip)
1640 # 1) destroy nodes thus calling this method (e.g. strip)
1641 # 2) use phasecache somewhere (e.g. commit)
1641 # 2) use phasecache somewhere (e.g. commit)
1642 #
1642 #
1643 # then 2) will fail because the phasecache contains nodes that were
1643 # then 2) will fail because the phasecache contains nodes that were
1644 # removed. We can either remove phasecache from the filecache,
1644 # removed. We can either remove phasecache from the filecache,
1645 # causing it to reload next time it is accessed, or simply filter
1645 # causing it to reload next time it is accessed, or simply filter
1646 # the removed nodes now and write the updated cache.
1646 # the removed nodes now and write the updated cache.
1647 self._phasecache.filterunknown(self)
1647 self._phasecache.filterunknown(self)
1648 self._phasecache.write()
1648 self._phasecache.write()
1649
1649
1650 # update the 'served' branch cache to help read only server process
1650 # update the 'served' branch cache to help read only server process
1651 # Thanks to branchcache collaboration this is done from the nearest
1651 # Thanks to branchcache collaboration this is done from the nearest
1652 # filtered subset and it is expected to be fast.
1652 # filtered subset and it is expected to be fast.
1653 branchmap.updatecache(self.filtered('served'))
1653 branchmap.updatecache(self.filtered('served'))
1654
1654
1655 # Ensure the persistent tag cache is updated. Doing it now
1655 # Ensure the persistent tag cache is updated. Doing it now
1656 # means that the tag cache only has to worry about destroyed
1656 # means that the tag cache only has to worry about destroyed
1657 # heads immediately after a strip/rollback. That in turn
1657 # heads immediately after a strip/rollback. That in turn
1658 # guarantees that "cachetip == currenttip" (comparing both rev
1658 # guarantees that "cachetip == currenttip" (comparing both rev
1659 # and node) always means no nodes have been added or destroyed.
1659 # and node) always means no nodes have been added or destroyed.
1660
1660
1661 # XXX this is suboptimal when qrefresh'ing: we strip the current
1661 # XXX this is suboptimal when qrefresh'ing: we strip the current
1662 # head, refresh the tag cache, then immediately add a new head.
1662 # head, refresh the tag cache, then immediately add a new head.
1663 # But I think doing it this way is necessary for the "instant
1663 # But I think doing it this way is necessary for the "instant
1664 # tag cache retrieval" case to work.
1664 # tag cache retrieval" case to work.
1665 self.invalidate()
1665 self.invalidate()
1666
1666
1667 def walk(self, match, node=None):
1667 def walk(self, match, node=None):
1668 '''
1668 '''
1669 walk recursively through the directory tree or a given
1669 walk recursively through the directory tree or a given
1670 changeset, finding all files matched by the match
1670 changeset, finding all files matched by the match
1671 function
1671 function
1672 '''
1672 '''
1673 return self[node].walk(match)
1673 return self[node].walk(match)
1674
1674
1675 def status(self, node1='.', node2=None, match=None,
1675 def status(self, node1='.', node2=None, match=None,
1676 ignored=False, clean=False, unknown=False,
1676 ignored=False, clean=False, unknown=False,
1677 listsubrepos=False):
1677 listsubrepos=False):
1678 '''a convenience method that calls node1.status(node2)'''
1678 '''a convenience method that calls node1.status(node2)'''
1679 return self[node1].status(node2, match, ignored, clean, unknown,
1679 return self[node1].status(node2, match, ignored, clean, unknown,
1680 listsubrepos)
1680 listsubrepos)
1681
1681
1682 def heads(self, start=None):
1682 def heads(self, start=None):
1683 heads = self.changelog.heads(start)
1683 heads = self.changelog.heads(start)
1684 # sort the output in rev descending order
1684 # sort the output in rev descending order
1685 return sorted(heads, key=self.changelog.rev, reverse=True)
1685 return sorted(heads, key=self.changelog.rev, reverse=True)
1686
1686
1687 def branchheads(self, branch=None, start=None, closed=False):
1687 def branchheads(self, branch=None, start=None, closed=False):
1688 '''return a (possibly filtered) list of heads for the given branch
1688 '''return a (possibly filtered) list of heads for the given branch
1689
1689
1690 Heads are returned in topological order, from newest to oldest.
1690 Heads are returned in topological order, from newest to oldest.
1691 If branch is None, use the dirstate branch.
1691 If branch is None, use the dirstate branch.
1692 If start is not None, return only heads reachable from start.
1692 If start is not None, return only heads reachable from start.
1693 If closed is True, return heads that are marked as closed as well.
1693 If closed is True, return heads that are marked as closed as well.
1694 '''
1694 '''
1695 if branch is None:
1695 if branch is None:
1696 branch = self[None].branch()
1696 branch = self[None].branch()
1697 branches = self.branchmap()
1697 branches = self.branchmap()
1698 if branch not in branches:
1698 if branch not in branches:
1699 return []
1699 return []
1700 # the cache returns heads ordered lowest to highest
1700 # the cache returns heads ordered lowest to highest
1701 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1701 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1702 if start is not None:
1702 if start is not None:
1703 # filter out the heads that cannot be reached from startrev
1703 # filter out the heads that cannot be reached from startrev
1704 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1704 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1705 bheads = [h for h in bheads if h in fbheads]
1705 bheads = [h for h in bheads if h in fbheads]
1706 return bheads
1706 return bheads
1707
1707
1708 def branches(self, nodes):
1708 def branches(self, nodes):
1709 if not nodes:
1709 if not nodes:
1710 nodes = [self.changelog.tip()]
1710 nodes = [self.changelog.tip()]
1711 b = []
1711 b = []
1712 for n in nodes:
1712 for n in nodes:
1713 t = n
1713 t = n
1714 while True:
1714 while True:
1715 p = self.changelog.parents(n)
1715 p = self.changelog.parents(n)
1716 if p[1] != nullid or p[0] == nullid:
1716 if p[1] != nullid or p[0] == nullid:
1717 b.append((t, n, p[0], p[1]))
1717 b.append((t, n, p[0], p[1]))
1718 break
1718 break
1719 n = p[0]
1719 n = p[0]
1720 return b
1720 return b
1721
1721
1722 def between(self, pairs):
1722 def between(self, pairs):
1723 r = []
1723 r = []
1724
1724
1725 for top, bottom in pairs:
1725 for top, bottom in pairs:
1726 n, l, i = top, [], 0
1726 n, l, i = top, [], 0
1727 f = 1
1727 f = 1
1728
1728
1729 while n != bottom and n != nullid:
1729 while n != bottom and n != nullid:
1730 p = self.changelog.parents(n)[0]
1730 p = self.changelog.parents(n)[0]
1731 if i == f:
1731 if i == f:
1732 l.append(n)
1732 l.append(n)
1733 f = f * 2
1733 f = f * 2
1734 n = p
1734 n = p
1735 i += 1
1735 i += 1
1736
1736
1737 r.append(l)
1737 r.append(l)
1738
1738
1739 return r
1739 return r
1740
1740
1741 def checkpush(self, pushop):
1741 def checkpush(self, pushop):
1742 """Extensions can override this function if additional checks have
1742 """Extensions can override this function if additional checks have
1743 to be performed before pushing, or call it if they override push
1743 to be performed before pushing, or call it if they override push
1744 command.
1744 command.
1745 """
1745 """
1746 pass
1746 pass
1747
1747
1748 @unfilteredpropertycache
1748 @unfilteredpropertycache
1749 def prepushoutgoinghooks(self):
1749 def prepushoutgoinghooks(self):
1750 """Return util.hooks consists of "(repo, remote, outgoing)"
1750 """Return util.hooks consists of "(repo, remote, outgoing)"
1751 functions, which are called before pushing changesets.
1751 functions, which are called before pushing changesets.
1752 """
1752 """
1753 return util.hooks()
1753 return util.hooks()
1754
1754
1755 def stream_in(self, remote, requirements):
1755 def stream_in(self, remote, remotereqs):
1756 lock = self.lock()
1756 lock = self.lock()
1757 try:
1757 try:
1758 # Save remote branchmap. We will use it later
1758 # Save remote branchmap. We will use it later
1759 # to speed up branchcache creation
1759 # to speed up branchcache creation
1760 rbranchmap = None
1760 rbranchmap = None
1761 if remote.capable("branchmap"):
1761 if remote.capable("branchmap"):
1762 rbranchmap = remote.branchmap()
1762 rbranchmap = remote.branchmap()
1763
1763
1764 fp = remote.stream_out()
1764 fp = remote.stream_out()
1765 l = fp.readline()
1765 l = fp.readline()
1766 try:
1766 try:
1767 resp = int(l)
1767 resp = int(l)
1768 except ValueError:
1768 except ValueError:
1769 raise error.ResponseError(
1769 raise error.ResponseError(
1770 _('unexpected response from remote server:'), l)
1770 _('unexpected response from remote server:'), l)
1771 if resp == 1:
1771 if resp == 1:
1772 raise util.Abort(_('operation forbidden by server'))
1772 raise util.Abort(_('operation forbidden by server'))
1773 elif resp == 2:
1773 elif resp == 2:
1774 raise util.Abort(_('locking the remote repository failed'))
1774 raise util.Abort(_('locking the remote repository failed'))
1775 elif resp != 0:
1775 elif resp != 0:
1776 raise util.Abort(_('the server sent an unknown error code'))
1776 raise util.Abort(_('the server sent an unknown error code'))
1777 self.ui.status(_('streaming all changes\n'))
1777 self.ui.status(_('streaming all changes\n'))
1778 l = fp.readline()
1778 l = fp.readline()
1779 try:
1779 try:
1780 total_files, total_bytes = map(int, l.split(' ', 1))
1780 total_files, total_bytes = map(int, l.split(' ', 1))
1781 except (ValueError, TypeError):
1781 except (ValueError, TypeError):
1782 raise error.ResponseError(
1782 raise error.ResponseError(
1783 _('unexpected response from remote server:'), l)
1783 _('unexpected response from remote server:'), l)
1784 self.ui.status(_('%d files to transfer, %s of data\n') %
1784 self.ui.status(_('%d files to transfer, %s of data\n') %
1785 (total_files, util.bytecount(total_bytes)))
1785 (total_files, util.bytecount(total_bytes)))
1786 handled_bytes = 0
1786 handled_bytes = 0
1787 self.ui.progress(_('clone'), 0, total=total_bytes)
1787 self.ui.progress(_('clone'), 0, total=total_bytes)
1788 start = time.time()
1788 start = time.time()
1789
1789
1790 tr = self.transaction(_('clone'))
1790 tr = self.transaction(_('clone'))
1791 try:
1791 try:
1792 for i in xrange(total_files):
1792 for i in xrange(total_files):
1793 # XXX doesn't support '\n' or '\r' in filenames
1793 # XXX doesn't support '\n' or '\r' in filenames
1794 l = fp.readline()
1794 l = fp.readline()
1795 try:
1795 try:
1796 name, size = l.split('\0', 1)
1796 name, size = l.split('\0', 1)
1797 size = int(size)
1797 size = int(size)
1798 except (ValueError, TypeError):
1798 except (ValueError, TypeError):
1799 raise error.ResponseError(
1799 raise error.ResponseError(
1800 _('unexpected response from remote server:'), l)
1800 _('unexpected response from remote server:'), l)
1801 if self.ui.debugflag:
1801 if self.ui.debugflag:
1802 self.ui.debug('adding %s (%s)\n' %
1802 self.ui.debug('adding %s (%s)\n' %
1803 (name, util.bytecount(size)))
1803 (name, util.bytecount(size)))
1804 # for backwards compat, name was partially encoded
1804 # for backwards compat, name was partially encoded
1805 ofp = self.svfs(store.decodedir(name), 'w')
1805 ofp = self.svfs(store.decodedir(name), 'w')
1806 for chunk in util.filechunkiter(fp, limit=size):
1806 for chunk in util.filechunkiter(fp, limit=size):
1807 handled_bytes += len(chunk)
1807 handled_bytes += len(chunk)
1808 self.ui.progress(_('clone'), handled_bytes,
1808 self.ui.progress(_('clone'), handled_bytes,
1809 total=total_bytes)
1809 total=total_bytes)
1810 ofp.write(chunk)
1810 ofp.write(chunk)
1811 ofp.close()
1811 ofp.close()
1812 tr.close()
1812 tr.close()
1813 finally:
1813 finally:
1814 tr.release()
1814 tr.release()
1815
1815
1816 # Writing straight to files circumvented the inmemory caches
1816 # Writing straight to files circumvented the inmemory caches
1817 self.invalidate()
1817 self.invalidate()
1818
1818
1819 elapsed = time.time() - start
1819 elapsed = time.time() - start
1820 if elapsed <= 0:
1820 if elapsed <= 0:
1821 elapsed = 0.001
1821 elapsed = 0.001
1822 self.ui.progress(_('clone'), None)
1822 self.ui.progress(_('clone'), None)
1823 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1823 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1824 (util.bytecount(total_bytes), elapsed,
1824 (util.bytecount(total_bytes), elapsed,
1825 util.bytecount(total_bytes / elapsed)))
1825 util.bytecount(total_bytes / elapsed)))
1826
1826
1827 # new requirements = old non-format requirements +
1827 # new requirements = old non-format requirements +
1828 # new format-related
1828 # new format-related remote requirements
1829 # requirements from the streamed-in repository
1829 # requirements from the streamed-in repository
1830 self.requirements = requirements | (
1830 self.requirements = remotereqs | (
1831 self.requirements - self.supportedformats)
1831 self.requirements - self.supportedformats)
1832 self._applyopenerreqs()
1832 self._applyopenerreqs()
1833 self._writerequirements()
1833 self._writerequirements()
1834
1834
1835 if rbranchmap:
1835 if rbranchmap:
1836 rbheads = []
1836 rbheads = []
1837 closed = []
1837 closed = []
1838 for bheads in rbranchmap.itervalues():
1838 for bheads in rbranchmap.itervalues():
1839 rbheads.extend(bheads)
1839 rbheads.extend(bheads)
1840 for h in bheads:
1840 for h in bheads:
1841 r = self.changelog.rev(h)
1841 r = self.changelog.rev(h)
1842 b, c = self.changelog.branchinfo(r)
1842 b, c = self.changelog.branchinfo(r)
1843 if c:
1843 if c:
1844 closed.append(h)
1844 closed.append(h)
1845
1845
1846 if rbheads:
1846 if rbheads:
1847 rtiprev = max((int(self.changelog.rev(node))
1847 rtiprev = max((int(self.changelog.rev(node))
1848 for node in rbheads))
1848 for node in rbheads))
1849 cache = branchmap.branchcache(rbranchmap,
1849 cache = branchmap.branchcache(rbranchmap,
1850 self[rtiprev].node(),
1850 self[rtiprev].node(),
1851 rtiprev,
1851 rtiprev,
1852 closednodes=closed)
1852 closednodes=closed)
1853 # Try to stick it as low as possible
1853 # Try to stick it as low as possible
1854 # filter above served are unlikely to be fetch from a clone
1854 # filter above served are unlikely to be fetch from a clone
1855 for candidate in ('base', 'immutable', 'served'):
1855 for candidate in ('base', 'immutable', 'served'):
1856 rview = self.filtered(candidate)
1856 rview = self.filtered(candidate)
1857 if cache.validfor(rview):
1857 if cache.validfor(rview):
1858 self._branchcaches[candidate] = cache
1858 self._branchcaches[candidate] = cache
1859 cache.write(rview)
1859 cache.write(rview)
1860 break
1860 break
1861 self.invalidate()
1861 self.invalidate()
1862 return len(self.heads()) + 1
1862 return len(self.heads()) + 1
1863 finally:
1863 finally:
1864 lock.release()
1864 lock.release()
1865
1865
1866 def clone(self, remote, heads=[], stream=None):
1866 def clone(self, remote, heads=[], stream=None):
1867 '''clone remote repository.
1867 '''clone remote repository.
1868
1868
1869 keyword arguments:
1869 keyword arguments:
1870 heads: list of revs to clone (forces use of pull)
1870 heads: list of revs to clone (forces use of pull)
1871 stream: use streaming clone if possible'''
1871 stream: use streaming clone if possible'''
1872
1872
1873 # now, all clients that can request uncompressed clones can
1873 # now, all clients that can request uncompressed clones can
1874 # read repo formats supported by all servers that can serve
1874 # read repo formats supported by all servers that can serve
1875 # them.
1875 # them.
1876
1876
1877 # if revlog format changes, client will have to check version
1877 # if revlog format changes, client will have to check version
1878 # and format flags on "stream" capability, and use
1878 # and format flags on "stream" capability, and use
1879 # uncompressed only if compatible.
1879 # uncompressed only if compatible.
1880
1880
1881 if stream is None:
1881 if stream is None:
1882 # if the server explicitly prefers to stream (for fast LANs)
1882 # if the server explicitly prefers to stream (for fast LANs)
1883 stream = remote.capable('stream-preferred')
1883 stream = remote.capable('stream-preferred')
1884
1884
1885 if stream and not heads:
1885 if stream and not heads:
1886 # 'stream' means remote revlog format is revlogv1 only
1886 # 'stream' means remote revlog format is revlogv1 only
1887 if remote.capable('stream'):
1887 if remote.capable('stream'):
1888 self.stream_in(remote, set(('revlogv1',)))
1888 self.stream_in(remote, set(('revlogv1',)))
1889 else:
1889 else:
1890 # otherwise, 'streamreqs' contains the remote revlog format
1890 # otherwise, 'streamreqs' contains the remote revlog format
1891 streamreqs = remote.capable('streamreqs')
1891 streamreqs = remote.capable('streamreqs')
1892 if streamreqs:
1892 if streamreqs:
1893 streamreqs = set(streamreqs.split(','))
1893 streamreqs = set(streamreqs.split(','))
1894 # if we support it, stream in and adjust our requirements
1894 # if we support it, stream in and adjust our requirements
1895 if not streamreqs - self.supportedformats:
1895 if not streamreqs - self.supportedformats:
1896 self.stream_in(remote, streamreqs)
1896 self.stream_in(remote, streamreqs)
1897
1897
1898 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1898 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1899 try:
1899 try:
1900 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1900 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1901 ret = exchange.pull(self, remote, heads).cgresult
1901 ret = exchange.pull(self, remote, heads).cgresult
1902 finally:
1902 finally:
1903 self.ui.restoreconfig(quiet)
1903 self.ui.restoreconfig(quiet)
1904 return ret
1904 return ret
1905
1905
1906 def pushkey(self, namespace, key, old, new):
1906 def pushkey(self, namespace, key, old, new):
1907 try:
1907 try:
1908 tr = self.currenttransaction()
1908 tr = self.currenttransaction()
1909 hookargs = {}
1909 hookargs = {}
1910 if tr is not None:
1910 if tr is not None:
1911 hookargs.update(tr.hookargs)
1911 hookargs.update(tr.hookargs)
1912 pending = lambda: tr.writepending() and self.root or ""
1912 pending = lambda: tr.writepending() and self.root or ""
1913 hookargs['pending'] = pending
1913 hookargs['pending'] = pending
1914 hookargs['namespace'] = namespace
1914 hookargs['namespace'] = namespace
1915 hookargs['key'] = key
1915 hookargs['key'] = key
1916 hookargs['old'] = old
1916 hookargs['old'] = old
1917 hookargs['new'] = new
1917 hookargs['new'] = new
1918 self.hook('prepushkey', throw=True, **hookargs)
1918 self.hook('prepushkey', throw=True, **hookargs)
1919 except error.HookAbort, exc:
1919 except error.HookAbort, exc:
1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 if exc.hint:
1921 if exc.hint:
1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 return False
1923 return False
1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 ret = pushkey.push(self, namespace, key, old, new)
1925 ret = pushkey.push(self, namespace, key, old, new)
1926 def runhook():
1926 def runhook():
1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 ret=ret)
1928 ret=ret)
1929 self._afterlock(runhook)
1929 self._afterlock(runhook)
1930 return ret
1930 return ret
1931
1931
1932 def listkeys(self, namespace):
1932 def listkeys(self, namespace):
1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 values = pushkey.list(self, namespace)
1935 values = pushkey.list(self, namespace)
1936 self.hook('listkeys', namespace=namespace, values=values)
1936 self.hook('listkeys', namespace=namespace, values=values)
1937 return values
1937 return values
1938
1938
1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 '''used to test argument passing over the wire'''
1940 '''used to test argument passing over the wire'''
1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1942
1942
1943 def savecommitmessage(self, text):
1943 def savecommitmessage(self, text):
1944 fp = self.vfs('last-message.txt', 'wb')
1944 fp = self.vfs('last-message.txt', 'wb')
1945 try:
1945 try:
1946 fp.write(text)
1946 fp.write(text)
1947 finally:
1947 finally:
1948 fp.close()
1948 fp.close()
1949 return self.pathto(fp.name[len(self.root) + 1:])
1949 return self.pathto(fp.name[len(self.root) + 1:])
1950
1950
1951 # used to avoid circular references so destructors work
1951 # used to avoid circular references so destructors work
1952 def aftertrans(files):
1952 def aftertrans(files):
1953 renamefiles = [tuple(t) for t in files]
1953 renamefiles = [tuple(t) for t in files]
1954 def a():
1954 def a():
1955 for vfs, src, dest in renamefiles:
1955 for vfs, src, dest in renamefiles:
1956 try:
1956 try:
1957 vfs.rename(src, dest)
1957 vfs.rename(src, dest)
1958 except OSError: # journal file does not yet exist
1958 except OSError: # journal file does not yet exist
1959 pass
1959 pass
1960 return a
1960 return a
1961
1961
1962 def undoname(fn):
1962 def undoname(fn):
1963 base, name = os.path.split(fn)
1963 base, name = os.path.split(fn)
1964 assert name.startswith('journal')
1964 assert name.startswith('journal')
1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1966
1966
1967 def instance(ui, path, create):
1967 def instance(ui, path, create):
1968 return localrepository(ui, util.urllocalpath(path), create)
1968 return localrepository(ui, util.urllocalpath(path), create)
1969
1969
1970 def islocal(path):
1970 def islocal(path):
1971 return True
1971 return True
General Comments 0
You need to be logged in to leave comments. Login now