##// END OF EJS Templates
localrepo: eliminate requirements class variable (API)...
Drew Gottlieb -
r24913:e3a928bd default
parent child Browse files
Show More
@@ -1,1972 +1,1971 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced, exc:
155 except error.PushRaced, exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 'dotencode'))
197 'dotencode'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 requirements = ['revlogv1']
200 filtername = None
199 filtername = None
201
200
202 # a list of (ui, featureset) functions.
201 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
202 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
203 featuresetupfuncs = set()
205
204
206 def _baserequirements(self, create):
205 def _baserequirements(self, create):
207 return self.requirements[:]
206 return ['revlogv1']
208
207
209 def __init__(self, baseui, path=None, create=False):
208 def __init__(self, baseui, path=None, create=False):
210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
209 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wopener = self.wvfs
210 self.wopener = self.wvfs
212 self.root = self.wvfs.base
211 self.root = self.wvfs.base
213 self.path = self.wvfs.join(".hg")
212 self.path = self.wvfs.join(".hg")
214 self.origroot = path
213 self.origroot = path
215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
214 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.vfs = scmutil.vfs(self.path)
215 self.vfs = scmutil.vfs(self.path)
217 self.opener = self.vfs
216 self.opener = self.vfs
218 self.baseui = baseui
217 self.baseui = baseui
219 self.ui = baseui.copy()
218 self.ui = baseui.copy()
220 self.ui.copy = baseui.copy # prevent copying repo configuration
219 self.ui.copy = baseui.copy # prevent copying repo configuration
221 # A list of callback to shape the phase if no data were found.
220 # A list of callback to shape the phase if no data were found.
222 # Callback are in the form: func(repo, roots) --> processed root.
221 # Callback are in the form: func(repo, roots) --> processed root.
223 # This list it to be filled by extension during repo setup
222 # This list it to be filled by extension during repo setup
224 self._phasedefaults = []
223 self._phasedefaults = []
225 try:
224 try:
226 self.ui.readconfig(self.join("hgrc"), self.root)
225 self.ui.readconfig(self.join("hgrc"), self.root)
227 extensions.loadall(self.ui)
226 extensions.loadall(self.ui)
228 except IOError:
227 except IOError:
229 pass
228 pass
230
229
231 if self.featuresetupfuncs:
230 if self.featuresetupfuncs:
232 self.supported = set(self._basesupported) # use private copy
231 self.supported = set(self._basesupported) # use private copy
233 extmods = set(m.__name__ for n, m
232 extmods = set(m.__name__ for n, m
234 in extensions.extensions(self.ui))
233 in extensions.extensions(self.ui))
235 for setupfunc in self.featuresetupfuncs:
234 for setupfunc in self.featuresetupfuncs:
236 if setupfunc.__module__ in extmods:
235 if setupfunc.__module__ in extmods:
237 setupfunc(self.ui, self.supported)
236 setupfunc(self.ui, self.supported)
238 else:
237 else:
239 self.supported = self._basesupported
238 self.supported = self._basesupported
240
239
241 if not self.vfs.isdir():
240 if not self.vfs.isdir():
242 if create:
241 if create:
243 if not self.wvfs.exists():
242 if not self.wvfs.exists():
244 self.wvfs.makedirs()
243 self.wvfs.makedirs()
245 self.vfs.makedir(notindexed=True)
244 self.vfs.makedir(notindexed=True)
246 requirements = self._baserequirements(create)
245 requirements = self._baserequirements(create)
247 if self.ui.configbool('format', 'usestore', True):
246 if self.ui.configbool('format', 'usestore', True):
248 self.vfs.mkdir("store")
247 self.vfs.mkdir("store")
249 requirements.append("store")
248 requirements.append("store")
250 if self.ui.configbool('format', 'usefncache', True):
249 if self.ui.configbool('format', 'usefncache', True):
251 requirements.append("fncache")
250 requirements.append("fncache")
252 if self.ui.configbool('format', 'dotencode', True):
251 if self.ui.configbool('format', 'dotencode', True):
253 requirements.append('dotencode')
252 requirements.append('dotencode')
254 # create an invalid changelog
253 # create an invalid changelog
255 self.vfs.append(
254 self.vfs.append(
256 "00changelog.i",
255 "00changelog.i",
257 '\0\0\0\2' # represents revlogv2
256 '\0\0\0\2' # represents revlogv2
258 ' dummy changelog to prevent using the old repo layout'
257 ' dummy changelog to prevent using the old repo layout'
259 )
258 )
260 if self.ui.configbool('format', 'generaldelta', False):
259 if self.ui.configbool('format', 'generaldelta', False):
261 requirements.append("generaldelta")
260 requirements.append("generaldelta")
262 if self.ui.configbool('experimental', 'manifestv2', False):
261 if self.ui.configbool('experimental', 'manifestv2', False):
263 requirements.append("manifestv2")
262 requirements.append("manifestv2")
264 requirements = set(requirements)
263 requirements = set(requirements)
265 else:
264 else:
266 raise error.RepoError(_("repository %s not found") % path)
265 raise error.RepoError(_("repository %s not found") % path)
267 elif create:
266 elif create:
268 raise error.RepoError(_("repository %s already exists") % path)
267 raise error.RepoError(_("repository %s already exists") % path)
269 else:
268 else:
270 try:
269 try:
271 requirements = scmutil.readrequires(self.vfs, self.supported)
270 requirements = scmutil.readrequires(self.vfs, self.supported)
272 except IOError, inst:
271 except IOError, inst:
273 if inst.errno != errno.ENOENT:
272 if inst.errno != errno.ENOENT:
274 raise
273 raise
275 requirements = set()
274 requirements = set()
276
275
277 self.sharedpath = self.path
276 self.sharedpath = self.path
278 try:
277 try:
279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
278 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
280 realpath=True)
279 realpath=True)
281 s = vfs.base
280 s = vfs.base
282 if not vfs.exists():
281 if not vfs.exists():
283 raise error.RepoError(
282 raise error.RepoError(
284 _('.hg/sharedpath points to nonexistent directory %s') % s)
283 _('.hg/sharedpath points to nonexistent directory %s') % s)
285 self.sharedpath = s
284 self.sharedpath = s
286 except IOError, inst:
285 except IOError, inst:
287 if inst.errno != errno.ENOENT:
286 if inst.errno != errno.ENOENT:
288 raise
287 raise
289
288
290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
289 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
291 self.spath = self.store.path
290 self.spath = self.store.path
292 self.svfs = self.store.vfs
291 self.svfs = self.store.vfs
293 self.sopener = self.svfs
292 self.sopener = self.svfs
294 self.sjoin = self.store.join
293 self.sjoin = self.store.join
295 self.vfs.createmode = self.store.createmode
294 self.vfs.createmode = self.store.createmode
296 self._applyrequirements(requirements)
295 self._applyrequirements(requirements)
297 if create:
296 if create:
298 self._writerequirements()
297 self._writerequirements()
299
298
300
299
301 self._branchcaches = {}
300 self._branchcaches = {}
302 self._revbranchcache = None
301 self._revbranchcache = None
303 self.filterpats = {}
302 self.filterpats = {}
304 self._datafilters = {}
303 self._datafilters = {}
305 self._transref = self._lockref = self._wlockref = None
304 self._transref = self._lockref = self._wlockref = None
306
305
307 # A cache for various files under .hg/ that tracks file changes,
306 # A cache for various files under .hg/ that tracks file changes,
308 # (used by the filecache decorator)
307 # (used by the filecache decorator)
309 #
308 #
310 # Maps a property name to its util.filecacheentry
309 # Maps a property name to its util.filecacheentry
311 self._filecache = {}
310 self._filecache = {}
312
311
313 # hold sets of revision to be filtered
312 # hold sets of revision to be filtered
314 # should be cleared when something might have changed the filter value:
313 # should be cleared when something might have changed the filter value:
315 # - new changesets,
314 # - new changesets,
316 # - phase change,
315 # - phase change,
317 # - new obsolescence marker,
316 # - new obsolescence marker,
318 # - working directory parent change,
317 # - working directory parent change,
319 # - bookmark changes
318 # - bookmark changes
320 self.filteredrevcache = {}
319 self.filteredrevcache = {}
321
320
322 # generic mapping between names and nodes
321 # generic mapping between names and nodes
323 self.names = namespaces.namespaces()
322 self.names = namespaces.namespaces()
324
323
325 def close(self):
324 def close(self):
326 self._writecaches()
325 self._writecaches()
327
326
328 def _writecaches(self):
327 def _writecaches(self):
329 if self._revbranchcache:
328 if self._revbranchcache:
330 self._revbranchcache.write()
329 self._revbranchcache.write()
331
330
332 def _restrictcapabilities(self, caps):
331 def _restrictcapabilities(self, caps):
333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
332 if self.ui.configbool('experimental', 'bundle2-advertise', True):
334 caps = set(caps)
333 caps = set(caps)
335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
334 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
336 caps.add('bundle2=' + urllib.quote(capsblob))
335 caps.add('bundle2=' + urllib.quote(capsblob))
337 return caps
336 return caps
338
337
339 def _applyrequirements(self, requirements):
338 def _applyrequirements(self, requirements):
340 self.requirements = requirements
339 self.requirements = requirements
341 self.svfs.options = dict((r, 1) for r in requirements
340 self.svfs.options = dict((r, 1) for r in requirements
342 if r in self.openerreqs)
341 if r in self.openerreqs)
343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
342 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
344 if chunkcachesize is not None:
343 if chunkcachesize is not None:
345 self.svfs.options['chunkcachesize'] = chunkcachesize
344 self.svfs.options['chunkcachesize'] = chunkcachesize
346 maxchainlen = self.ui.configint('format', 'maxchainlen')
345 maxchainlen = self.ui.configint('format', 'maxchainlen')
347 if maxchainlen is not None:
346 if maxchainlen is not None:
348 self.svfs.options['maxchainlen'] = maxchainlen
347 self.svfs.options['maxchainlen'] = maxchainlen
349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
348 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
350 if manifestcachesize is not None:
349 if manifestcachesize is not None:
351 self.svfs.options['manifestcachesize'] = manifestcachesize
350 self.svfs.options['manifestcachesize'] = manifestcachesize
352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
351 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
353 if usetreemanifest is not None:
352 if usetreemanifest is not None:
354 self.svfs.options['usetreemanifest'] = usetreemanifest
353 self.svfs.options['usetreemanifest'] = usetreemanifest
355
354
356 def _writerequirements(self):
355 def _writerequirements(self):
357 reqfile = self.vfs("requires", "w")
356 reqfile = self.vfs("requires", "w")
358 for r in sorted(self.requirements):
357 for r in sorted(self.requirements):
359 reqfile.write("%s\n" % r)
358 reqfile.write("%s\n" % r)
360 reqfile.close()
359 reqfile.close()
361
360
362 def _checknested(self, path):
361 def _checknested(self, path):
363 """Determine if path is a legal nested repository."""
362 """Determine if path is a legal nested repository."""
364 if not path.startswith(self.root):
363 if not path.startswith(self.root):
365 return False
364 return False
366 subpath = path[len(self.root) + 1:]
365 subpath = path[len(self.root) + 1:]
367 normsubpath = util.pconvert(subpath)
366 normsubpath = util.pconvert(subpath)
368
367
369 # XXX: Checking against the current working copy is wrong in
368 # XXX: Checking against the current working copy is wrong in
370 # the sense that it can reject things like
369 # the sense that it can reject things like
371 #
370 #
372 # $ hg cat -r 10 sub/x.txt
371 # $ hg cat -r 10 sub/x.txt
373 #
372 #
374 # if sub/ is no longer a subrepository in the working copy
373 # if sub/ is no longer a subrepository in the working copy
375 # parent revision.
374 # parent revision.
376 #
375 #
377 # However, it can of course also allow things that would have
376 # However, it can of course also allow things that would have
378 # been rejected before, such as the above cat command if sub/
377 # been rejected before, such as the above cat command if sub/
379 # is a subrepository now, but was a normal directory before.
378 # is a subrepository now, but was a normal directory before.
380 # The old path auditor would have rejected by mistake since it
379 # The old path auditor would have rejected by mistake since it
381 # panics when it sees sub/.hg/.
380 # panics when it sees sub/.hg/.
382 #
381 #
383 # All in all, checking against the working copy seems sensible
382 # All in all, checking against the working copy seems sensible
384 # since we want to prevent access to nested repositories on
383 # since we want to prevent access to nested repositories on
385 # the filesystem *now*.
384 # the filesystem *now*.
386 ctx = self[None]
385 ctx = self[None]
387 parts = util.splitpath(subpath)
386 parts = util.splitpath(subpath)
388 while parts:
387 while parts:
389 prefix = '/'.join(parts)
388 prefix = '/'.join(parts)
390 if prefix in ctx.substate:
389 if prefix in ctx.substate:
391 if prefix == normsubpath:
390 if prefix == normsubpath:
392 return True
391 return True
393 else:
392 else:
394 sub = ctx.sub(prefix)
393 sub = ctx.sub(prefix)
395 return sub.checknested(subpath[len(prefix) + 1:])
394 return sub.checknested(subpath[len(prefix) + 1:])
396 else:
395 else:
397 parts.pop()
396 parts.pop()
398 return False
397 return False
399
398
400 def peer(self):
399 def peer(self):
401 return localpeer(self) # not cached to avoid reference cycle
400 return localpeer(self) # not cached to avoid reference cycle
402
401
403 def unfiltered(self):
402 def unfiltered(self):
404 """Return unfiltered version of the repository
403 """Return unfiltered version of the repository
405
404
406 Intended to be overwritten by filtered repo."""
405 Intended to be overwritten by filtered repo."""
407 return self
406 return self
408
407
409 def filtered(self, name):
408 def filtered(self, name):
410 """Return a filtered version of a repository"""
409 """Return a filtered version of a repository"""
411 # build a new class with the mixin and the current class
410 # build a new class with the mixin and the current class
412 # (possibly subclass of the repo)
411 # (possibly subclass of the repo)
413 class proxycls(repoview.repoview, self.unfiltered().__class__):
412 class proxycls(repoview.repoview, self.unfiltered().__class__):
414 pass
413 pass
415 return proxycls(self, name)
414 return proxycls(self, name)
416
415
417 @repofilecache('bookmarks')
416 @repofilecache('bookmarks')
418 def _bookmarks(self):
417 def _bookmarks(self):
419 return bookmarks.bmstore(self)
418 return bookmarks.bmstore(self)
420
419
421 @repofilecache('bookmarks.current')
420 @repofilecache('bookmarks.current')
422 def _bookmarkcurrent(self):
421 def _bookmarkcurrent(self):
423 return bookmarks.readcurrent(self)
422 return bookmarks.readcurrent(self)
424
423
425 def bookmarkheads(self, bookmark):
424 def bookmarkheads(self, bookmark):
426 name = bookmark.split('@', 1)[0]
425 name = bookmark.split('@', 1)[0]
427 heads = []
426 heads = []
428 for mark, n in self._bookmarks.iteritems():
427 for mark, n in self._bookmarks.iteritems():
429 if mark.split('@', 1)[0] == name:
428 if mark.split('@', 1)[0] == name:
430 heads.append(n)
429 heads.append(n)
431 return heads
430 return heads
432
431
433 @storecache('phaseroots')
432 @storecache('phaseroots')
434 def _phasecache(self):
433 def _phasecache(self):
435 return phases.phasecache(self, self._phasedefaults)
434 return phases.phasecache(self, self._phasedefaults)
436
435
437 @storecache('obsstore')
436 @storecache('obsstore')
438 def obsstore(self):
437 def obsstore(self):
439 # read default format for new obsstore.
438 # read default format for new obsstore.
440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
439 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 # rely on obsstore class default when possible.
440 # rely on obsstore class default when possible.
442 kwargs = {}
441 kwargs = {}
443 if defaultformat is not None:
442 if defaultformat is not None:
444 kwargs['defaultformat'] = defaultformat
443 kwargs['defaultformat'] = defaultformat
445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
444 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 store = obsolete.obsstore(self.svfs, readonly=readonly,
445 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 **kwargs)
446 **kwargs)
448 if store and readonly:
447 if store and readonly:
449 self.ui.warn(
448 self.ui.warn(
450 _('obsolete feature not enabled but %i markers found!\n')
449 _('obsolete feature not enabled but %i markers found!\n')
451 % len(list(store)))
450 % len(list(store)))
452 return store
451 return store
453
452
454 @storecache('00changelog.i')
453 @storecache('00changelog.i')
455 def changelog(self):
454 def changelog(self):
456 c = changelog.changelog(self.svfs)
455 c = changelog.changelog(self.svfs)
457 if 'HG_PENDING' in os.environ:
456 if 'HG_PENDING' in os.environ:
458 p = os.environ['HG_PENDING']
457 p = os.environ['HG_PENDING']
459 if p.startswith(self.root):
458 if p.startswith(self.root):
460 c.readpending('00changelog.i.a')
459 c.readpending('00changelog.i.a')
461 return c
460 return c
462
461
463 @storecache('00manifest.i')
462 @storecache('00manifest.i')
464 def manifest(self):
463 def manifest(self):
465 return manifest.manifest(self.svfs)
464 return manifest.manifest(self.svfs)
466
465
467 @repofilecache('dirstate')
466 @repofilecache('dirstate')
468 def dirstate(self):
467 def dirstate(self):
469 warned = [0]
468 warned = [0]
470 def validate(node):
469 def validate(node):
471 try:
470 try:
472 self.changelog.rev(node)
471 self.changelog.rev(node)
473 return node
472 return node
474 except error.LookupError:
473 except error.LookupError:
475 if not warned[0]:
474 if not warned[0]:
476 warned[0] = True
475 warned[0] = True
477 self.ui.warn(_("warning: ignoring unknown"
476 self.ui.warn(_("warning: ignoring unknown"
478 " working parent %s!\n") % short(node))
477 " working parent %s!\n") % short(node))
479 return nullid
478 return nullid
480
479
481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
482
481
483 def __getitem__(self, changeid):
482 def __getitem__(self, changeid):
484 if changeid is None:
483 if changeid is None:
485 return context.workingctx(self)
484 return context.workingctx(self)
486 if isinstance(changeid, slice):
485 if isinstance(changeid, slice):
487 return [context.changectx(self, i)
486 return [context.changectx(self, i)
488 for i in xrange(*changeid.indices(len(self)))
487 for i in xrange(*changeid.indices(len(self)))
489 if i not in self.changelog.filteredrevs]
488 if i not in self.changelog.filteredrevs]
490 return context.changectx(self, changeid)
489 return context.changectx(self, changeid)
491
490
492 def __contains__(self, changeid):
491 def __contains__(self, changeid):
493 try:
492 try:
494 self[changeid]
493 self[changeid]
495 return True
494 return True
496 except error.RepoLookupError:
495 except error.RepoLookupError:
497 return False
496 return False
498
497
499 def __nonzero__(self):
498 def __nonzero__(self):
500 return True
499 return True
501
500
502 def __len__(self):
501 def __len__(self):
503 return len(self.changelog)
502 return len(self.changelog)
504
503
505 def __iter__(self):
504 def __iter__(self):
506 return iter(self.changelog)
505 return iter(self.changelog)
507
506
508 def revs(self, expr, *args):
507 def revs(self, expr, *args):
509 '''Return a list of revisions matching the given revset'''
508 '''Return a list of revisions matching the given revset'''
510 expr = revset.formatspec(expr, *args)
509 expr = revset.formatspec(expr, *args)
511 m = revset.match(None, expr)
510 m = revset.match(None, expr)
512 return m(self)
511 return m(self)
513
512
514 def set(self, expr, *args):
513 def set(self, expr, *args):
515 '''
514 '''
516 Yield a context for each matching revision, after doing arg
515 Yield a context for each matching revision, after doing arg
517 replacement via revset.formatspec
516 replacement via revset.formatspec
518 '''
517 '''
519 for r in self.revs(expr, *args):
518 for r in self.revs(expr, *args):
520 yield self[r]
519 yield self[r]
521
520
522 def url(self):
521 def url(self):
523 return 'file:' + self.root
522 return 'file:' + self.root
524
523
525 def hook(self, name, throw=False, **args):
524 def hook(self, name, throw=False, **args):
526 """Call a hook, passing this repo instance.
525 """Call a hook, passing this repo instance.
527
526
528 This a convenience method to aid invoking hooks. Extensions likely
527 This a convenience method to aid invoking hooks. Extensions likely
529 won't call this unless they have registered a custom hook or are
528 won't call this unless they have registered a custom hook or are
530 replacing code that is expected to call a hook.
529 replacing code that is expected to call a hook.
531 """
530 """
532 return hook.hook(self.ui, self, name, throw, **args)
531 return hook.hook(self.ui, self, name, throw, **args)
533
532
534 @unfilteredmethod
533 @unfilteredmethod
535 def _tag(self, names, node, message, local, user, date, extra={},
534 def _tag(self, names, node, message, local, user, date, extra={},
536 editor=False):
535 editor=False):
537 if isinstance(names, str):
536 if isinstance(names, str):
538 names = (names,)
537 names = (names,)
539
538
540 branches = self.branchmap()
539 branches = self.branchmap()
541 for name in names:
540 for name in names:
542 self.hook('pretag', throw=True, node=hex(node), tag=name,
541 self.hook('pretag', throw=True, node=hex(node), tag=name,
543 local=local)
542 local=local)
544 if name in branches:
543 if name in branches:
545 self.ui.warn(_("warning: tag %s conflicts with existing"
544 self.ui.warn(_("warning: tag %s conflicts with existing"
546 " branch name\n") % name)
545 " branch name\n") % name)
547
546
548 def writetags(fp, names, munge, prevtags):
547 def writetags(fp, names, munge, prevtags):
549 fp.seek(0, 2)
548 fp.seek(0, 2)
550 if prevtags and prevtags[-1] != '\n':
549 if prevtags and prevtags[-1] != '\n':
551 fp.write('\n')
550 fp.write('\n')
552 for name in names:
551 for name in names:
553 if munge:
552 if munge:
554 m = munge(name)
553 m = munge(name)
555 else:
554 else:
556 m = name
555 m = name
557
556
558 if (self._tagscache.tagtypes and
557 if (self._tagscache.tagtypes and
559 name in self._tagscache.tagtypes):
558 name in self._tagscache.tagtypes):
560 old = self.tags().get(name, nullid)
559 old = self.tags().get(name, nullid)
561 fp.write('%s %s\n' % (hex(old), m))
560 fp.write('%s %s\n' % (hex(old), m))
562 fp.write('%s %s\n' % (hex(node), m))
561 fp.write('%s %s\n' % (hex(node), m))
563 fp.close()
562 fp.close()
564
563
565 prevtags = ''
564 prevtags = ''
566 if local:
565 if local:
567 try:
566 try:
568 fp = self.vfs('localtags', 'r+')
567 fp = self.vfs('localtags', 'r+')
569 except IOError:
568 except IOError:
570 fp = self.vfs('localtags', 'a')
569 fp = self.vfs('localtags', 'a')
571 else:
570 else:
572 prevtags = fp.read()
571 prevtags = fp.read()
573
572
574 # local tags are stored in the current charset
573 # local tags are stored in the current charset
575 writetags(fp, names, None, prevtags)
574 writetags(fp, names, None, prevtags)
576 for name in names:
575 for name in names:
577 self.hook('tag', node=hex(node), tag=name, local=local)
576 self.hook('tag', node=hex(node), tag=name, local=local)
578 return
577 return
579
578
580 try:
579 try:
581 fp = self.wfile('.hgtags', 'rb+')
580 fp = self.wfile('.hgtags', 'rb+')
582 except IOError, e:
581 except IOError, e:
583 if e.errno != errno.ENOENT:
582 if e.errno != errno.ENOENT:
584 raise
583 raise
585 fp = self.wfile('.hgtags', 'ab')
584 fp = self.wfile('.hgtags', 'ab')
586 else:
585 else:
587 prevtags = fp.read()
586 prevtags = fp.read()
588
587
589 # committed tags are stored in UTF-8
588 # committed tags are stored in UTF-8
590 writetags(fp, names, encoding.fromlocal, prevtags)
589 writetags(fp, names, encoding.fromlocal, prevtags)
591
590
592 fp.close()
591 fp.close()
593
592
594 self.invalidatecaches()
593 self.invalidatecaches()
595
594
596 if '.hgtags' not in self.dirstate:
595 if '.hgtags' not in self.dirstate:
597 self[None].add(['.hgtags'])
596 self[None].add(['.hgtags'])
598
597
599 m = matchmod.exact(self.root, '', ['.hgtags'])
598 m = matchmod.exact(self.root, '', ['.hgtags'])
600 tagnode = self.commit(message, user, date, extra=extra, match=m,
599 tagnode = self.commit(message, user, date, extra=extra, match=m,
601 editor=editor)
600 editor=editor)
602
601
603 for name in names:
602 for name in names:
604 self.hook('tag', node=hex(node), tag=name, local=local)
603 self.hook('tag', node=hex(node), tag=name, local=local)
605
604
606 return tagnode
605 return tagnode
607
606
608 def tag(self, names, node, message, local, user, date, editor=False):
607 def tag(self, names, node, message, local, user, date, editor=False):
609 '''tag a revision with one or more symbolic names.
608 '''tag a revision with one or more symbolic names.
610
609
611 names is a list of strings or, when adding a single tag, names may be a
610 names is a list of strings or, when adding a single tag, names may be a
612 string.
611 string.
613
612
614 if local is True, the tags are stored in a per-repository file.
613 if local is True, the tags are stored in a per-repository file.
615 otherwise, they are stored in the .hgtags file, and a new
614 otherwise, they are stored in the .hgtags file, and a new
616 changeset is committed with the change.
615 changeset is committed with the change.
617
616
618 keyword arguments:
617 keyword arguments:
619
618
620 local: whether to store tags in non-version-controlled file
619 local: whether to store tags in non-version-controlled file
621 (default False)
620 (default False)
622
621
623 message: commit message to use if committing
622 message: commit message to use if committing
624
623
625 user: name of user to use if committing
624 user: name of user to use if committing
626
625
627 date: date tuple to use if committing'''
626 date: date tuple to use if committing'''
628
627
629 if not local:
628 if not local:
630 m = matchmod.exact(self.root, '', ['.hgtags'])
629 m = matchmod.exact(self.root, '', ['.hgtags'])
631 if util.any(self.status(match=m, unknown=True, ignored=True)):
630 if util.any(self.status(match=m, unknown=True, ignored=True)):
632 raise util.Abort(_('working copy of .hgtags is changed'),
631 raise util.Abort(_('working copy of .hgtags is changed'),
633 hint=_('please commit .hgtags manually'))
632 hint=_('please commit .hgtags manually'))
634
633
635 self.tags() # instantiate the cache
634 self.tags() # instantiate the cache
636 self._tag(names, node, message, local, user, date, editor=editor)
635 self._tag(names, node, message, local, user, date, editor=editor)
637
636
638 @filteredpropertycache
637 @filteredpropertycache
639 def _tagscache(self):
638 def _tagscache(self):
640 '''Returns a tagscache object that contains various tags related
639 '''Returns a tagscache object that contains various tags related
641 caches.'''
640 caches.'''
642
641
643 # This simplifies its cache management by having one decorated
642 # This simplifies its cache management by having one decorated
644 # function (this one) and the rest simply fetch things from it.
643 # function (this one) and the rest simply fetch things from it.
645 class tagscache(object):
644 class tagscache(object):
646 def __init__(self):
645 def __init__(self):
647 # These two define the set of tags for this repository. tags
646 # These two define the set of tags for this repository. tags
648 # maps tag name to node; tagtypes maps tag name to 'global' or
647 # maps tag name to node; tagtypes maps tag name to 'global' or
649 # 'local'. (Global tags are defined by .hgtags across all
648 # 'local'. (Global tags are defined by .hgtags across all
650 # heads, and local tags are defined in .hg/localtags.)
649 # heads, and local tags are defined in .hg/localtags.)
651 # They constitute the in-memory cache of tags.
650 # They constitute the in-memory cache of tags.
652 self.tags = self.tagtypes = None
651 self.tags = self.tagtypes = None
653
652
654 self.nodetagscache = self.tagslist = None
653 self.nodetagscache = self.tagslist = None
655
654
656 cache = tagscache()
655 cache = tagscache()
657 cache.tags, cache.tagtypes = self._findtags()
656 cache.tags, cache.tagtypes = self._findtags()
658
657
659 return cache
658 return cache
660
659
661 def tags(self):
660 def tags(self):
662 '''return a mapping of tag to node'''
661 '''return a mapping of tag to node'''
663 t = {}
662 t = {}
664 if self.changelog.filteredrevs:
663 if self.changelog.filteredrevs:
665 tags, tt = self._findtags()
664 tags, tt = self._findtags()
666 else:
665 else:
667 tags = self._tagscache.tags
666 tags = self._tagscache.tags
668 for k, v in tags.iteritems():
667 for k, v in tags.iteritems():
669 try:
668 try:
670 # ignore tags to unknown nodes
669 # ignore tags to unknown nodes
671 self.changelog.rev(v)
670 self.changelog.rev(v)
672 t[k] = v
671 t[k] = v
673 except (error.LookupError, ValueError):
672 except (error.LookupError, ValueError):
674 pass
673 pass
675 return t
674 return t
676
675
677 def _findtags(self):
676 def _findtags(self):
678 '''Do the hard work of finding tags. Return a pair of dicts
677 '''Do the hard work of finding tags. Return a pair of dicts
679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
680 maps tag name to a string like \'global\' or \'local\'.
679 maps tag name to a string like \'global\' or \'local\'.
681 Subclasses or extensions are free to add their own tags, but
680 Subclasses or extensions are free to add their own tags, but
682 should be aware that the returned dicts will be retained for the
681 should be aware that the returned dicts will be retained for the
683 duration of the localrepo object.'''
682 duration of the localrepo object.'''
684
683
685 # XXX what tagtype should subclasses/extensions use? Currently
684 # XXX what tagtype should subclasses/extensions use? Currently
686 # mq and bookmarks add tags, but do not set the tagtype at all.
685 # mq and bookmarks add tags, but do not set the tagtype at all.
687 # Should each extension invent its own tag type? Should there
686 # Should each extension invent its own tag type? Should there
688 # be one tagtype for all such "virtual" tags? Or is the status
687 # be one tagtype for all such "virtual" tags? Or is the status
689 # quo fine?
688 # quo fine?
690
689
691 alltags = {} # map tag name to (node, hist)
690 alltags = {} # map tag name to (node, hist)
692 tagtypes = {}
691 tagtypes = {}
693
692
694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
696
695
697 # Build the return dicts. Have to re-encode tag names because
696 # Build the return dicts. Have to re-encode tag names because
698 # the tags module always uses UTF-8 (in order not to lose info
697 # the tags module always uses UTF-8 (in order not to lose info
699 # writing to the cache), but the rest of Mercurial wants them in
698 # writing to the cache), but the rest of Mercurial wants them in
700 # local encoding.
699 # local encoding.
701 tags = {}
700 tags = {}
702 for (name, (node, hist)) in alltags.iteritems():
701 for (name, (node, hist)) in alltags.iteritems():
703 if node != nullid:
702 if node != nullid:
704 tags[encoding.tolocal(name)] = node
703 tags[encoding.tolocal(name)] = node
705 tags['tip'] = self.changelog.tip()
704 tags['tip'] = self.changelog.tip()
706 tagtypes = dict([(encoding.tolocal(name), value)
705 tagtypes = dict([(encoding.tolocal(name), value)
707 for (name, value) in tagtypes.iteritems()])
706 for (name, value) in tagtypes.iteritems()])
708 return (tags, tagtypes)
707 return (tags, tagtypes)
709
708
710 def tagtype(self, tagname):
709 def tagtype(self, tagname):
711 '''
710 '''
712 return the type of the given tag. result can be:
711 return the type of the given tag. result can be:
713
712
714 'local' : a local tag
713 'local' : a local tag
715 'global' : a global tag
714 'global' : a global tag
716 None : tag does not exist
715 None : tag does not exist
717 '''
716 '''
718
717
719 return self._tagscache.tagtypes.get(tagname)
718 return self._tagscache.tagtypes.get(tagname)
720
719
721 def tagslist(self):
720 def tagslist(self):
722 '''return a list of tags ordered by revision'''
721 '''return a list of tags ordered by revision'''
723 if not self._tagscache.tagslist:
722 if not self._tagscache.tagslist:
724 l = []
723 l = []
725 for t, n in self.tags().iteritems():
724 for t, n in self.tags().iteritems():
726 l.append((self.changelog.rev(n), t, n))
725 l.append((self.changelog.rev(n), t, n))
727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
728
727
729 return self._tagscache.tagslist
728 return self._tagscache.tagslist
730
729
731 def nodetags(self, node):
730 def nodetags(self, node):
732 '''return the tags associated with a node'''
731 '''return the tags associated with a node'''
733 if not self._tagscache.nodetagscache:
732 if not self._tagscache.nodetagscache:
734 nodetagscache = {}
733 nodetagscache = {}
735 for t, n in self._tagscache.tags.iteritems():
734 for t, n in self._tagscache.tags.iteritems():
736 nodetagscache.setdefault(n, []).append(t)
735 nodetagscache.setdefault(n, []).append(t)
737 for tags in nodetagscache.itervalues():
736 for tags in nodetagscache.itervalues():
738 tags.sort()
737 tags.sort()
739 self._tagscache.nodetagscache = nodetagscache
738 self._tagscache.nodetagscache = nodetagscache
740 return self._tagscache.nodetagscache.get(node, [])
739 return self._tagscache.nodetagscache.get(node, [])
741
740
742 def nodebookmarks(self, node):
741 def nodebookmarks(self, node):
743 marks = []
742 marks = []
744 for bookmark, n in self._bookmarks.iteritems():
743 for bookmark, n in self._bookmarks.iteritems():
745 if n == node:
744 if n == node:
746 marks.append(bookmark)
745 marks.append(bookmark)
747 return sorted(marks)
746 return sorted(marks)
748
747
749 def branchmap(self):
748 def branchmap(self):
750 '''returns a dictionary {branch: [branchheads]} with branchheads
749 '''returns a dictionary {branch: [branchheads]} with branchheads
751 ordered by increasing revision number'''
750 ordered by increasing revision number'''
752 branchmap.updatecache(self)
751 branchmap.updatecache(self)
753 return self._branchcaches[self.filtername]
752 return self._branchcaches[self.filtername]
754
753
755 @unfilteredmethod
754 @unfilteredmethod
756 def revbranchcache(self):
755 def revbranchcache(self):
757 if not self._revbranchcache:
756 if not self._revbranchcache:
758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
759 return self._revbranchcache
758 return self._revbranchcache
760
759
761 def branchtip(self, branch, ignoremissing=False):
760 def branchtip(self, branch, ignoremissing=False):
762 '''return the tip node for a given branch
761 '''return the tip node for a given branch
763
762
764 If ignoremissing is True, then this method will not raise an error.
763 If ignoremissing is True, then this method will not raise an error.
765 This is helpful for callers that only expect None for a missing branch
764 This is helpful for callers that only expect None for a missing branch
766 (e.g. namespace).
765 (e.g. namespace).
767
766
768 '''
767 '''
769 try:
768 try:
770 return self.branchmap().branchtip(branch)
769 return self.branchmap().branchtip(branch)
771 except KeyError:
770 except KeyError:
772 if not ignoremissing:
771 if not ignoremissing:
773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
774 else:
773 else:
775 pass
774 pass
776
775
777 def lookup(self, key):
776 def lookup(self, key):
778 return self[key].node()
777 return self[key].node()
779
778
780 def lookupbranch(self, key, remote=None):
779 def lookupbranch(self, key, remote=None):
781 repo = remote or self
780 repo = remote or self
782 if key in repo.branchmap():
781 if key in repo.branchmap():
783 return key
782 return key
784
783
785 repo = (remote and remote.local()) and remote or self
784 repo = (remote and remote.local()) and remote or self
786 return repo[key].branch()
785 return repo[key].branch()
787
786
788 def known(self, nodes):
787 def known(self, nodes):
789 nm = self.changelog.nodemap
788 nm = self.changelog.nodemap
790 pc = self._phasecache
789 pc = self._phasecache
791 result = []
790 result = []
792 for n in nodes:
791 for n in nodes:
793 r = nm.get(n)
792 r = nm.get(n)
794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
795 result.append(resp)
794 result.append(resp)
796 return result
795 return result
797
796
798 def local(self):
797 def local(self):
799 return self
798 return self
800
799
801 def cancopy(self):
800 def cancopy(self):
802 # so statichttprepo's override of local() works
801 # so statichttprepo's override of local() works
803 if not self.local():
802 if not self.local():
804 return False
803 return False
805 if not self.ui.configbool('phases', 'publish', True):
804 if not self.ui.configbool('phases', 'publish', True):
806 return True
805 return True
807 # if publishing we can't copy if there is filtered content
806 # if publishing we can't copy if there is filtered content
808 return not self.filtered('visible').changelog.filteredrevs
807 return not self.filtered('visible').changelog.filteredrevs
809
808
810 def shared(self):
809 def shared(self):
811 '''the type of shared repository (None if not shared)'''
810 '''the type of shared repository (None if not shared)'''
812 if self.sharedpath != self.path:
811 if self.sharedpath != self.path:
813 return 'store'
812 return 'store'
814 return None
813 return None
815
814
816 def join(self, f, *insidef):
815 def join(self, f, *insidef):
817 return self.vfs.join(os.path.join(f, *insidef))
816 return self.vfs.join(os.path.join(f, *insidef))
818
817
819 def wjoin(self, f, *insidef):
818 def wjoin(self, f, *insidef):
820 return self.vfs.reljoin(self.root, f, *insidef)
819 return self.vfs.reljoin(self.root, f, *insidef)
821
820
822 def file(self, f):
821 def file(self, f):
823 if f[0] == '/':
822 if f[0] == '/':
824 f = f[1:]
823 f = f[1:]
825 return filelog.filelog(self.svfs, f)
824 return filelog.filelog(self.svfs, f)
826
825
827 def changectx(self, changeid):
826 def changectx(self, changeid):
828 return self[changeid]
827 return self[changeid]
829
828
830 def parents(self, changeid=None):
829 def parents(self, changeid=None):
831 '''get list of changectxs for parents of changeid'''
830 '''get list of changectxs for parents of changeid'''
832 return self[changeid].parents()
831 return self[changeid].parents()
833
832
834 def setparents(self, p1, p2=nullid):
833 def setparents(self, p1, p2=nullid):
835 self.dirstate.beginparentchange()
834 self.dirstate.beginparentchange()
836 copies = self.dirstate.setparents(p1, p2)
835 copies = self.dirstate.setparents(p1, p2)
837 pctx = self[p1]
836 pctx = self[p1]
838 if copies:
837 if copies:
839 # Adjust copy records, the dirstate cannot do it, it
838 # Adjust copy records, the dirstate cannot do it, it
840 # requires access to parents manifests. Preserve them
839 # requires access to parents manifests. Preserve them
841 # only for entries added to first parent.
840 # only for entries added to first parent.
842 for f in copies:
841 for f in copies:
843 if f not in pctx and copies[f] in pctx:
842 if f not in pctx and copies[f] in pctx:
844 self.dirstate.copy(copies[f], f)
843 self.dirstate.copy(copies[f], f)
845 if p2 == nullid:
844 if p2 == nullid:
846 for f, s in sorted(self.dirstate.copies().items()):
845 for f, s in sorted(self.dirstate.copies().items()):
847 if f not in pctx and s not in pctx:
846 if f not in pctx and s not in pctx:
848 self.dirstate.copy(None, f)
847 self.dirstate.copy(None, f)
849 self.dirstate.endparentchange()
848 self.dirstate.endparentchange()
850
849
851 def filectx(self, path, changeid=None, fileid=None):
850 def filectx(self, path, changeid=None, fileid=None):
852 """changeid can be a changeset revision, node, or tag.
851 """changeid can be a changeset revision, node, or tag.
853 fileid can be a file revision or node."""
852 fileid can be a file revision or node."""
854 return context.filectx(self, path, changeid, fileid)
853 return context.filectx(self, path, changeid, fileid)
855
854
856 def getcwd(self):
855 def getcwd(self):
857 return self.dirstate.getcwd()
856 return self.dirstate.getcwd()
858
857
859 def pathto(self, f, cwd=None):
858 def pathto(self, f, cwd=None):
860 return self.dirstate.pathto(f, cwd)
859 return self.dirstate.pathto(f, cwd)
861
860
862 def wfile(self, f, mode='r'):
861 def wfile(self, f, mode='r'):
863 return self.wvfs(f, mode)
862 return self.wvfs(f, mode)
864
863
865 def _link(self, f):
864 def _link(self, f):
866 return self.wvfs.islink(f)
865 return self.wvfs.islink(f)
867
866
868 def _loadfilter(self, filter):
867 def _loadfilter(self, filter):
869 if filter not in self.filterpats:
868 if filter not in self.filterpats:
870 l = []
869 l = []
871 for pat, cmd in self.ui.configitems(filter):
870 for pat, cmd in self.ui.configitems(filter):
872 if cmd == '!':
871 if cmd == '!':
873 continue
872 continue
874 mf = matchmod.match(self.root, '', [pat])
873 mf = matchmod.match(self.root, '', [pat])
875 fn = None
874 fn = None
876 params = cmd
875 params = cmd
877 for name, filterfn in self._datafilters.iteritems():
876 for name, filterfn in self._datafilters.iteritems():
878 if cmd.startswith(name):
877 if cmd.startswith(name):
879 fn = filterfn
878 fn = filterfn
880 params = cmd[len(name):].lstrip()
879 params = cmd[len(name):].lstrip()
881 break
880 break
882 if not fn:
881 if not fn:
883 fn = lambda s, c, **kwargs: util.filter(s, c)
882 fn = lambda s, c, **kwargs: util.filter(s, c)
884 # Wrap old filters not supporting keyword arguments
883 # Wrap old filters not supporting keyword arguments
885 if not inspect.getargspec(fn)[2]:
884 if not inspect.getargspec(fn)[2]:
886 oldfn = fn
885 oldfn = fn
887 fn = lambda s, c, **kwargs: oldfn(s, c)
886 fn = lambda s, c, **kwargs: oldfn(s, c)
888 l.append((mf, fn, params))
887 l.append((mf, fn, params))
889 self.filterpats[filter] = l
888 self.filterpats[filter] = l
890 return self.filterpats[filter]
889 return self.filterpats[filter]
891
890
892 def _filter(self, filterpats, filename, data):
891 def _filter(self, filterpats, filename, data):
893 for mf, fn, cmd in filterpats:
892 for mf, fn, cmd in filterpats:
894 if mf(filename):
893 if mf(filename):
895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
897 break
896 break
898
897
899 return data
898 return data
900
899
901 @unfilteredpropertycache
900 @unfilteredpropertycache
902 def _encodefilterpats(self):
901 def _encodefilterpats(self):
903 return self._loadfilter('encode')
902 return self._loadfilter('encode')
904
903
905 @unfilteredpropertycache
904 @unfilteredpropertycache
906 def _decodefilterpats(self):
905 def _decodefilterpats(self):
907 return self._loadfilter('decode')
906 return self._loadfilter('decode')
908
907
909 def adddatafilter(self, name, filter):
908 def adddatafilter(self, name, filter):
910 self._datafilters[name] = filter
909 self._datafilters[name] = filter
911
910
912 def wread(self, filename):
911 def wread(self, filename):
913 if self._link(filename):
912 if self._link(filename):
914 data = self.wvfs.readlink(filename)
913 data = self.wvfs.readlink(filename)
915 else:
914 else:
916 data = self.wvfs.read(filename)
915 data = self.wvfs.read(filename)
917 return self._filter(self._encodefilterpats, filename, data)
916 return self._filter(self._encodefilterpats, filename, data)
918
917
919 def wwrite(self, filename, data, flags):
918 def wwrite(self, filename, data, flags):
920 """write ``data`` into ``filename`` in the working directory
919 """write ``data`` into ``filename`` in the working directory
921
920
922 This returns length of written (maybe decoded) data.
921 This returns length of written (maybe decoded) data.
923 """
922 """
924 data = self._filter(self._decodefilterpats, filename, data)
923 data = self._filter(self._decodefilterpats, filename, data)
925 if 'l' in flags:
924 if 'l' in flags:
926 self.wvfs.symlink(data, filename)
925 self.wvfs.symlink(data, filename)
927 else:
926 else:
928 self.wvfs.write(filename, data)
927 self.wvfs.write(filename, data)
929 if 'x' in flags:
928 if 'x' in flags:
930 self.wvfs.setflags(filename, False, True)
929 self.wvfs.setflags(filename, False, True)
931 return len(data)
930 return len(data)
932
931
933 def wwritedata(self, filename, data):
932 def wwritedata(self, filename, data):
934 return self._filter(self._decodefilterpats, filename, data)
933 return self._filter(self._decodefilterpats, filename, data)
935
934
936 def currenttransaction(self):
935 def currenttransaction(self):
937 """return the current transaction or None if non exists"""
936 """return the current transaction or None if non exists"""
938 if self._transref:
937 if self._transref:
939 tr = self._transref()
938 tr = self._transref()
940 else:
939 else:
941 tr = None
940 tr = None
942
941
943 if tr and tr.running():
942 if tr and tr.running():
944 return tr
943 return tr
945 return None
944 return None
946
945
947 def transaction(self, desc, report=None):
946 def transaction(self, desc, report=None):
948 if (self.ui.configbool('devel', 'all')
947 if (self.ui.configbool('devel', 'all')
949 or self.ui.configbool('devel', 'check-locks')):
948 or self.ui.configbool('devel', 'check-locks')):
950 l = self._lockref and self._lockref()
949 l = self._lockref and self._lockref()
951 if l is None or not l.held:
950 if l is None or not l.held:
952 scmutil.develwarn(self.ui, 'transaction with no lock')
951 scmutil.develwarn(self.ui, 'transaction with no lock')
953 tr = self.currenttransaction()
952 tr = self.currenttransaction()
954 if tr is not None:
953 if tr is not None:
955 return tr.nest()
954 return tr.nest()
956
955
957 # abort here if the journal already exists
956 # abort here if the journal already exists
958 if self.svfs.exists("journal"):
957 if self.svfs.exists("journal"):
959 raise error.RepoError(
958 raise error.RepoError(
960 _("abandoned transaction found"),
959 _("abandoned transaction found"),
961 hint=_("run 'hg recover' to clean up transaction"))
960 hint=_("run 'hg recover' to clean up transaction"))
962
961
963 self.hook('pretxnopen', throw=True, txnname=desc)
962 self.hook('pretxnopen', throw=True, txnname=desc)
964
963
965 self._writejournal(desc)
964 self._writejournal(desc)
966 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
965 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
967 if report:
966 if report:
968 rp = report
967 rp = report
969 else:
968 else:
970 rp = self.ui.warn
969 rp = self.ui.warn
971 vfsmap = {'plain': self.vfs} # root of .hg/
970 vfsmap = {'plain': self.vfs} # root of .hg/
972 # we must avoid cyclic reference between repo and transaction.
971 # we must avoid cyclic reference between repo and transaction.
973 reporef = weakref.ref(self)
972 reporef = weakref.ref(self)
974 def validate(tr):
973 def validate(tr):
975 """will run pre-closing hooks"""
974 """will run pre-closing hooks"""
976 pending = lambda: tr.writepending() and self.root or ""
975 pending = lambda: tr.writepending() and self.root or ""
977 reporef().hook('pretxnclose', throw=True, pending=pending,
976 reporef().hook('pretxnclose', throw=True, pending=pending,
978 xnname=desc, **tr.hookargs)
977 xnname=desc, **tr.hookargs)
979
978
980 tr = transaction.transaction(rp, self.sopener, vfsmap,
979 tr = transaction.transaction(rp, self.sopener, vfsmap,
981 "journal",
980 "journal",
982 "undo",
981 "undo",
983 aftertrans(renames),
982 aftertrans(renames),
984 self.store.createmode,
983 self.store.createmode,
985 validator=validate)
984 validator=validate)
986
985
987 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
986 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
988 tr.hookargs['TXNID'] = trid
987 tr.hookargs['TXNID'] = trid
989 # note: writing the fncache only during finalize mean that the file is
988 # note: writing the fncache only during finalize mean that the file is
990 # outdated when running hooks. As fncache is used for streaming clone,
989 # outdated when running hooks. As fncache is used for streaming clone,
991 # this is not expected to break anything that happen during the hooks.
990 # this is not expected to break anything that happen during the hooks.
992 tr.addfinalize('flush-fncache', self.store.write)
991 tr.addfinalize('flush-fncache', self.store.write)
993 def txnclosehook(tr2):
992 def txnclosehook(tr2):
994 """To be run if transaction is successful, will schedule a hook run
993 """To be run if transaction is successful, will schedule a hook run
995 """
994 """
996 def hook():
995 def hook():
997 reporef().hook('txnclose', throw=False, txnname=desc,
996 reporef().hook('txnclose', throw=False, txnname=desc,
998 **tr2.hookargs)
997 **tr2.hookargs)
999 reporef()._afterlock(hook)
998 reporef()._afterlock(hook)
1000 tr.addfinalize('txnclose-hook', txnclosehook)
999 tr.addfinalize('txnclose-hook', txnclosehook)
1001 def txnaborthook(tr2):
1000 def txnaborthook(tr2):
1002 """To be run if transaction is aborted
1001 """To be run if transaction is aborted
1003 """
1002 """
1004 reporef().hook('txnabort', throw=False, txnname=desc,
1003 reporef().hook('txnabort', throw=False, txnname=desc,
1005 **tr2.hookargs)
1004 **tr2.hookargs)
1006 tr.addabort('txnabort-hook', txnaborthook)
1005 tr.addabort('txnabort-hook', txnaborthook)
1007 self._transref = weakref.ref(tr)
1006 self._transref = weakref.ref(tr)
1008 return tr
1007 return tr
1009
1008
1010 def _journalfiles(self):
1009 def _journalfiles(self):
1011 return ((self.svfs, 'journal'),
1010 return ((self.svfs, 'journal'),
1012 (self.vfs, 'journal.dirstate'),
1011 (self.vfs, 'journal.dirstate'),
1013 (self.vfs, 'journal.branch'),
1012 (self.vfs, 'journal.branch'),
1014 (self.vfs, 'journal.desc'),
1013 (self.vfs, 'journal.desc'),
1015 (self.vfs, 'journal.bookmarks'),
1014 (self.vfs, 'journal.bookmarks'),
1016 (self.svfs, 'journal.phaseroots'))
1015 (self.svfs, 'journal.phaseroots'))
1017
1016
1018 def undofiles(self):
1017 def undofiles(self):
1019 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1018 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1020
1019
1021 def _writejournal(self, desc):
1020 def _writejournal(self, desc):
1022 self.vfs.write("journal.dirstate",
1021 self.vfs.write("journal.dirstate",
1023 self.vfs.tryread("dirstate"))
1022 self.vfs.tryread("dirstate"))
1024 self.vfs.write("journal.branch",
1023 self.vfs.write("journal.branch",
1025 encoding.fromlocal(self.dirstate.branch()))
1024 encoding.fromlocal(self.dirstate.branch()))
1026 self.vfs.write("journal.desc",
1025 self.vfs.write("journal.desc",
1027 "%d\n%s\n" % (len(self), desc))
1026 "%d\n%s\n" % (len(self), desc))
1028 self.vfs.write("journal.bookmarks",
1027 self.vfs.write("journal.bookmarks",
1029 self.vfs.tryread("bookmarks"))
1028 self.vfs.tryread("bookmarks"))
1030 self.svfs.write("journal.phaseroots",
1029 self.svfs.write("journal.phaseroots",
1031 self.svfs.tryread("phaseroots"))
1030 self.svfs.tryread("phaseroots"))
1032
1031
1033 def recover(self):
1032 def recover(self):
1034 lock = self.lock()
1033 lock = self.lock()
1035 try:
1034 try:
1036 if self.svfs.exists("journal"):
1035 if self.svfs.exists("journal"):
1037 self.ui.status(_("rolling back interrupted transaction\n"))
1036 self.ui.status(_("rolling back interrupted transaction\n"))
1038 vfsmap = {'': self.svfs,
1037 vfsmap = {'': self.svfs,
1039 'plain': self.vfs,}
1038 'plain': self.vfs,}
1040 transaction.rollback(self.svfs, vfsmap, "journal",
1039 transaction.rollback(self.svfs, vfsmap, "journal",
1041 self.ui.warn)
1040 self.ui.warn)
1042 self.invalidate()
1041 self.invalidate()
1043 return True
1042 return True
1044 else:
1043 else:
1045 self.ui.warn(_("no interrupted transaction available\n"))
1044 self.ui.warn(_("no interrupted transaction available\n"))
1046 return False
1045 return False
1047 finally:
1046 finally:
1048 lock.release()
1047 lock.release()
1049
1048
1050 def rollback(self, dryrun=False, force=False):
1049 def rollback(self, dryrun=False, force=False):
1051 wlock = lock = None
1050 wlock = lock = None
1052 try:
1051 try:
1053 wlock = self.wlock()
1052 wlock = self.wlock()
1054 lock = self.lock()
1053 lock = self.lock()
1055 if self.svfs.exists("undo"):
1054 if self.svfs.exists("undo"):
1056 return self._rollback(dryrun, force)
1055 return self._rollback(dryrun, force)
1057 else:
1056 else:
1058 self.ui.warn(_("no rollback information available\n"))
1057 self.ui.warn(_("no rollback information available\n"))
1059 return 1
1058 return 1
1060 finally:
1059 finally:
1061 release(lock, wlock)
1060 release(lock, wlock)
1062
1061
1063 @unfilteredmethod # Until we get smarter cache management
1062 @unfilteredmethod # Until we get smarter cache management
1064 def _rollback(self, dryrun, force):
1063 def _rollback(self, dryrun, force):
1065 ui = self.ui
1064 ui = self.ui
1066 try:
1065 try:
1067 args = self.vfs.read('undo.desc').splitlines()
1066 args = self.vfs.read('undo.desc').splitlines()
1068 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1067 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1069 if len(args) >= 3:
1068 if len(args) >= 3:
1070 detail = args[2]
1069 detail = args[2]
1071 oldtip = oldlen - 1
1070 oldtip = oldlen - 1
1072
1071
1073 if detail and ui.verbose:
1072 if detail and ui.verbose:
1074 msg = (_('repository tip rolled back to revision %s'
1073 msg = (_('repository tip rolled back to revision %s'
1075 ' (undo %s: %s)\n')
1074 ' (undo %s: %s)\n')
1076 % (oldtip, desc, detail))
1075 % (oldtip, desc, detail))
1077 else:
1076 else:
1078 msg = (_('repository tip rolled back to revision %s'
1077 msg = (_('repository tip rolled back to revision %s'
1079 ' (undo %s)\n')
1078 ' (undo %s)\n')
1080 % (oldtip, desc))
1079 % (oldtip, desc))
1081 except IOError:
1080 except IOError:
1082 msg = _('rolling back unknown transaction\n')
1081 msg = _('rolling back unknown transaction\n')
1083 desc = None
1082 desc = None
1084
1083
1085 if not force and self['.'] != self['tip'] and desc == 'commit':
1084 if not force and self['.'] != self['tip'] and desc == 'commit':
1086 raise util.Abort(
1085 raise util.Abort(
1087 _('rollback of last commit while not checked out '
1086 _('rollback of last commit while not checked out '
1088 'may lose data'), hint=_('use -f to force'))
1087 'may lose data'), hint=_('use -f to force'))
1089
1088
1090 ui.status(msg)
1089 ui.status(msg)
1091 if dryrun:
1090 if dryrun:
1092 return 0
1091 return 0
1093
1092
1094 parents = self.dirstate.parents()
1093 parents = self.dirstate.parents()
1095 self.destroying()
1094 self.destroying()
1096 vfsmap = {'plain': self.vfs, '': self.svfs}
1095 vfsmap = {'plain': self.vfs, '': self.svfs}
1097 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1096 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1098 if self.vfs.exists('undo.bookmarks'):
1097 if self.vfs.exists('undo.bookmarks'):
1099 self.vfs.rename('undo.bookmarks', 'bookmarks')
1098 self.vfs.rename('undo.bookmarks', 'bookmarks')
1100 if self.svfs.exists('undo.phaseroots'):
1099 if self.svfs.exists('undo.phaseroots'):
1101 self.svfs.rename('undo.phaseroots', 'phaseroots')
1100 self.svfs.rename('undo.phaseroots', 'phaseroots')
1102 self.invalidate()
1101 self.invalidate()
1103
1102
1104 parentgone = (parents[0] not in self.changelog.nodemap or
1103 parentgone = (parents[0] not in self.changelog.nodemap or
1105 parents[1] not in self.changelog.nodemap)
1104 parents[1] not in self.changelog.nodemap)
1106 if parentgone:
1105 if parentgone:
1107 self.vfs.rename('undo.dirstate', 'dirstate')
1106 self.vfs.rename('undo.dirstate', 'dirstate')
1108 try:
1107 try:
1109 branch = self.vfs.read('undo.branch')
1108 branch = self.vfs.read('undo.branch')
1110 self.dirstate.setbranch(encoding.tolocal(branch))
1109 self.dirstate.setbranch(encoding.tolocal(branch))
1111 except IOError:
1110 except IOError:
1112 ui.warn(_('named branch could not be reset: '
1111 ui.warn(_('named branch could not be reset: '
1113 'current branch is still \'%s\'\n')
1112 'current branch is still \'%s\'\n')
1114 % self.dirstate.branch())
1113 % self.dirstate.branch())
1115
1114
1116 self.dirstate.invalidate()
1115 self.dirstate.invalidate()
1117 parents = tuple([p.rev() for p in self.parents()])
1116 parents = tuple([p.rev() for p in self.parents()])
1118 if len(parents) > 1:
1117 if len(parents) > 1:
1119 ui.status(_('working directory now based on '
1118 ui.status(_('working directory now based on '
1120 'revisions %d and %d\n') % parents)
1119 'revisions %d and %d\n') % parents)
1121 else:
1120 else:
1122 ui.status(_('working directory now based on '
1121 ui.status(_('working directory now based on '
1123 'revision %d\n') % parents)
1122 'revision %d\n') % parents)
1124 ms = mergemod.mergestate(self)
1123 ms = mergemod.mergestate(self)
1125 ms.reset(self['.'].node())
1124 ms.reset(self['.'].node())
1126
1125
1127 # TODO: if we know which new heads may result from this rollback, pass
1126 # TODO: if we know which new heads may result from this rollback, pass
1128 # them to destroy(), which will prevent the branchhead cache from being
1127 # them to destroy(), which will prevent the branchhead cache from being
1129 # invalidated.
1128 # invalidated.
1130 self.destroyed()
1129 self.destroyed()
1131 return 0
1130 return 0
1132
1131
1133 def invalidatecaches(self):
1132 def invalidatecaches(self):
1134
1133
1135 if '_tagscache' in vars(self):
1134 if '_tagscache' in vars(self):
1136 # can't use delattr on proxy
1135 # can't use delattr on proxy
1137 del self.__dict__['_tagscache']
1136 del self.__dict__['_tagscache']
1138
1137
1139 self.unfiltered()._branchcaches.clear()
1138 self.unfiltered()._branchcaches.clear()
1140 self.invalidatevolatilesets()
1139 self.invalidatevolatilesets()
1141
1140
1142 def invalidatevolatilesets(self):
1141 def invalidatevolatilesets(self):
1143 self.filteredrevcache.clear()
1142 self.filteredrevcache.clear()
1144 obsolete.clearobscaches(self)
1143 obsolete.clearobscaches(self)
1145
1144
1146 def invalidatedirstate(self):
1145 def invalidatedirstate(self):
1147 '''Invalidates the dirstate, causing the next call to dirstate
1146 '''Invalidates the dirstate, causing the next call to dirstate
1148 to check if it was modified since the last time it was read,
1147 to check if it was modified since the last time it was read,
1149 rereading it if it has.
1148 rereading it if it has.
1150
1149
1151 This is different to dirstate.invalidate() that it doesn't always
1150 This is different to dirstate.invalidate() that it doesn't always
1152 rereads the dirstate. Use dirstate.invalidate() if you want to
1151 rereads the dirstate. Use dirstate.invalidate() if you want to
1153 explicitly read the dirstate again (i.e. restoring it to a previous
1152 explicitly read the dirstate again (i.e. restoring it to a previous
1154 known good state).'''
1153 known good state).'''
1155 if hasunfilteredcache(self, 'dirstate'):
1154 if hasunfilteredcache(self, 'dirstate'):
1156 for k in self.dirstate._filecache:
1155 for k in self.dirstate._filecache:
1157 try:
1156 try:
1158 delattr(self.dirstate, k)
1157 delattr(self.dirstate, k)
1159 except AttributeError:
1158 except AttributeError:
1160 pass
1159 pass
1161 delattr(self.unfiltered(), 'dirstate')
1160 delattr(self.unfiltered(), 'dirstate')
1162
1161
1163 def invalidate(self):
1162 def invalidate(self):
1164 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1163 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1165 for k in self._filecache:
1164 for k in self._filecache:
1166 # dirstate is invalidated separately in invalidatedirstate()
1165 # dirstate is invalidated separately in invalidatedirstate()
1167 if k == 'dirstate':
1166 if k == 'dirstate':
1168 continue
1167 continue
1169
1168
1170 try:
1169 try:
1171 delattr(unfiltered, k)
1170 delattr(unfiltered, k)
1172 except AttributeError:
1171 except AttributeError:
1173 pass
1172 pass
1174 self.invalidatecaches()
1173 self.invalidatecaches()
1175 self.store.invalidatecaches()
1174 self.store.invalidatecaches()
1176
1175
1177 def invalidateall(self):
1176 def invalidateall(self):
1178 '''Fully invalidates both store and non-store parts, causing the
1177 '''Fully invalidates both store and non-store parts, causing the
1179 subsequent operation to reread any outside changes.'''
1178 subsequent operation to reread any outside changes.'''
1180 # extension should hook this to invalidate its caches
1179 # extension should hook this to invalidate its caches
1181 self.invalidate()
1180 self.invalidate()
1182 self.invalidatedirstate()
1181 self.invalidatedirstate()
1183
1182
1184 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1185 try:
1184 try:
1186 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1185 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1187 except error.LockHeld, inst:
1186 except error.LockHeld, inst:
1188 if not wait:
1187 if not wait:
1189 raise
1188 raise
1190 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1189 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1191 (desc, inst.locker))
1190 (desc, inst.locker))
1192 # default to 600 seconds timeout
1191 # default to 600 seconds timeout
1193 l = lockmod.lock(vfs, lockname,
1192 l = lockmod.lock(vfs, lockname,
1194 int(self.ui.config("ui", "timeout", "600")),
1193 int(self.ui.config("ui", "timeout", "600")),
1195 releasefn, desc=desc)
1194 releasefn, desc=desc)
1196 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1195 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1197 if acquirefn:
1196 if acquirefn:
1198 acquirefn()
1197 acquirefn()
1199 return l
1198 return l
1200
1199
1201 def _afterlock(self, callback):
1200 def _afterlock(self, callback):
1202 """add a callback to be run when the repository is fully unlocked
1201 """add a callback to be run when the repository is fully unlocked
1203
1202
1204 The callback will be executed when the outermost lock is released
1203 The callback will be executed when the outermost lock is released
1205 (with wlock being higher level than 'lock')."""
1204 (with wlock being higher level than 'lock')."""
1206 for ref in (self._wlockref, self._lockref):
1205 for ref in (self._wlockref, self._lockref):
1207 l = ref and ref()
1206 l = ref and ref()
1208 if l and l.held:
1207 if l and l.held:
1209 l.postrelease.append(callback)
1208 l.postrelease.append(callback)
1210 break
1209 break
1211 else: # no lock have been found.
1210 else: # no lock have been found.
1212 callback()
1211 callback()
1213
1212
1214 def lock(self, wait=True):
1213 def lock(self, wait=True):
1215 '''Lock the repository store (.hg/store) and return a weak reference
1214 '''Lock the repository store (.hg/store) and return a weak reference
1216 to the lock. Use this before modifying the store (e.g. committing or
1215 to the lock. Use this before modifying the store (e.g. committing or
1217 stripping). If you are opening a transaction, get a lock as well.)
1216 stripping). If you are opening a transaction, get a lock as well.)
1218
1217
1219 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1218 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1220 'wlock' first to avoid a dead-lock hazard.'''
1219 'wlock' first to avoid a dead-lock hazard.'''
1221 l = self._lockref and self._lockref()
1220 l = self._lockref and self._lockref()
1222 if l is not None and l.held:
1221 if l is not None and l.held:
1223 l.lock()
1222 l.lock()
1224 return l
1223 return l
1225
1224
1226 def unlock():
1225 def unlock():
1227 for k, ce in self._filecache.items():
1226 for k, ce in self._filecache.items():
1228 if k == 'dirstate' or k not in self.__dict__:
1227 if k == 'dirstate' or k not in self.__dict__:
1229 continue
1228 continue
1230 ce.refresh()
1229 ce.refresh()
1231
1230
1232 l = self._lock(self.svfs, "lock", wait, unlock,
1231 l = self._lock(self.svfs, "lock", wait, unlock,
1233 self.invalidate, _('repository %s') % self.origroot)
1232 self.invalidate, _('repository %s') % self.origroot)
1234 self._lockref = weakref.ref(l)
1233 self._lockref = weakref.ref(l)
1235 return l
1234 return l
1236
1235
1237 def wlock(self, wait=True):
1236 def wlock(self, wait=True):
1238 '''Lock the non-store parts of the repository (everything under
1237 '''Lock the non-store parts of the repository (everything under
1239 .hg except .hg/store) and return a weak reference to the lock.
1238 .hg except .hg/store) and return a weak reference to the lock.
1240
1239
1241 Use this before modifying files in .hg.
1240 Use this before modifying files in .hg.
1242
1241
1243 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1242 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1244 'wlock' first to avoid a dead-lock hazard.'''
1243 'wlock' first to avoid a dead-lock hazard.'''
1245 l = self._wlockref and self._wlockref()
1244 l = self._wlockref and self._wlockref()
1246 if l is not None and l.held:
1245 if l is not None and l.held:
1247 l.lock()
1246 l.lock()
1248 return l
1247 return l
1249
1248
1250 # We do not need to check for non-waiting lock aquisition. Such
1249 # We do not need to check for non-waiting lock aquisition. Such
1251 # acquisition would not cause dead-lock as they would just fail.
1250 # acquisition would not cause dead-lock as they would just fail.
1252 if wait and (self.ui.configbool('devel', 'all')
1251 if wait and (self.ui.configbool('devel', 'all')
1253 or self.ui.configbool('devel', 'check-locks')):
1252 or self.ui.configbool('devel', 'check-locks')):
1254 l = self._lockref and self._lockref()
1253 l = self._lockref and self._lockref()
1255 if l is not None and l.held:
1254 if l is not None and l.held:
1256 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1255 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1257
1256
1258 def unlock():
1257 def unlock():
1259 if self.dirstate.pendingparentchange():
1258 if self.dirstate.pendingparentchange():
1260 self.dirstate.invalidate()
1259 self.dirstate.invalidate()
1261 else:
1260 else:
1262 self.dirstate.write()
1261 self.dirstate.write()
1263
1262
1264 self._filecache['dirstate'].refresh()
1263 self._filecache['dirstate'].refresh()
1265
1264
1266 l = self._lock(self.vfs, "wlock", wait, unlock,
1265 l = self._lock(self.vfs, "wlock", wait, unlock,
1267 self.invalidatedirstate, _('working directory of %s') %
1266 self.invalidatedirstate, _('working directory of %s') %
1268 self.origroot)
1267 self.origroot)
1269 self._wlockref = weakref.ref(l)
1268 self._wlockref = weakref.ref(l)
1270 return l
1269 return l
1271
1270
1272 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1271 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1273 """
1272 """
1274 commit an individual file as part of a larger transaction
1273 commit an individual file as part of a larger transaction
1275 """
1274 """
1276
1275
1277 fname = fctx.path()
1276 fname = fctx.path()
1278 fparent1 = manifest1.get(fname, nullid)
1277 fparent1 = manifest1.get(fname, nullid)
1279 fparent2 = manifest2.get(fname, nullid)
1278 fparent2 = manifest2.get(fname, nullid)
1280 if isinstance(fctx, context.filectx):
1279 if isinstance(fctx, context.filectx):
1281 node = fctx.filenode()
1280 node = fctx.filenode()
1282 if node in [fparent1, fparent2]:
1281 if node in [fparent1, fparent2]:
1283 self.ui.debug('reusing %s filelog entry\n' % fname)
1282 self.ui.debug('reusing %s filelog entry\n' % fname)
1284 return node
1283 return node
1285
1284
1286 flog = self.file(fname)
1285 flog = self.file(fname)
1287 meta = {}
1286 meta = {}
1288 copy = fctx.renamed()
1287 copy = fctx.renamed()
1289 if copy and copy[0] != fname:
1288 if copy and copy[0] != fname:
1290 # Mark the new revision of this file as a copy of another
1289 # Mark the new revision of this file as a copy of another
1291 # file. This copy data will effectively act as a parent
1290 # file. This copy data will effectively act as a parent
1292 # of this new revision. If this is a merge, the first
1291 # of this new revision. If this is a merge, the first
1293 # parent will be the nullid (meaning "look up the copy data")
1292 # parent will be the nullid (meaning "look up the copy data")
1294 # and the second one will be the other parent. For example:
1293 # and the second one will be the other parent. For example:
1295 #
1294 #
1296 # 0 --- 1 --- 3 rev1 changes file foo
1295 # 0 --- 1 --- 3 rev1 changes file foo
1297 # \ / rev2 renames foo to bar and changes it
1296 # \ / rev2 renames foo to bar and changes it
1298 # \- 2 -/ rev3 should have bar with all changes and
1297 # \- 2 -/ rev3 should have bar with all changes and
1299 # should record that bar descends from
1298 # should record that bar descends from
1300 # bar in rev2 and foo in rev1
1299 # bar in rev2 and foo in rev1
1301 #
1300 #
1302 # this allows this merge to succeed:
1301 # this allows this merge to succeed:
1303 #
1302 #
1304 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1303 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1305 # \ / merging rev3 and rev4 should use bar@rev2
1304 # \ / merging rev3 and rev4 should use bar@rev2
1306 # \- 2 --- 4 as the merge base
1305 # \- 2 --- 4 as the merge base
1307 #
1306 #
1308
1307
1309 cfname = copy[0]
1308 cfname = copy[0]
1310 crev = manifest1.get(cfname)
1309 crev = manifest1.get(cfname)
1311 newfparent = fparent2
1310 newfparent = fparent2
1312
1311
1313 if manifest2: # branch merge
1312 if manifest2: # branch merge
1314 if fparent2 == nullid or crev is None: # copied on remote side
1313 if fparent2 == nullid or crev is None: # copied on remote side
1315 if cfname in manifest2:
1314 if cfname in manifest2:
1316 crev = manifest2[cfname]
1315 crev = manifest2[cfname]
1317 newfparent = fparent1
1316 newfparent = fparent1
1318
1317
1319 # Here, we used to search backwards through history to try to find
1318 # Here, we used to search backwards through history to try to find
1320 # where the file copy came from if the source of a copy was not in
1319 # where the file copy came from if the source of a copy was not in
1321 # the parent directory. However, this doesn't actually make sense to
1320 # the parent directory. However, this doesn't actually make sense to
1322 # do (what does a copy from something not in your working copy even
1321 # do (what does a copy from something not in your working copy even
1323 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1322 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1324 # the user that copy information was dropped, so if they didn't
1323 # the user that copy information was dropped, so if they didn't
1325 # expect this outcome it can be fixed, but this is the correct
1324 # expect this outcome it can be fixed, but this is the correct
1326 # behavior in this circumstance.
1325 # behavior in this circumstance.
1327
1326
1328 if crev:
1327 if crev:
1329 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1328 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1330 meta["copy"] = cfname
1329 meta["copy"] = cfname
1331 meta["copyrev"] = hex(crev)
1330 meta["copyrev"] = hex(crev)
1332 fparent1, fparent2 = nullid, newfparent
1331 fparent1, fparent2 = nullid, newfparent
1333 else:
1332 else:
1334 self.ui.warn(_("warning: can't find ancestor for '%s' "
1333 self.ui.warn(_("warning: can't find ancestor for '%s' "
1335 "copied from '%s'!\n") % (fname, cfname))
1334 "copied from '%s'!\n") % (fname, cfname))
1336
1335
1337 elif fparent1 == nullid:
1336 elif fparent1 == nullid:
1338 fparent1, fparent2 = fparent2, nullid
1337 fparent1, fparent2 = fparent2, nullid
1339 elif fparent2 != nullid:
1338 elif fparent2 != nullid:
1340 # is one parent an ancestor of the other?
1339 # is one parent an ancestor of the other?
1341 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1340 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1342 if fparent1 in fparentancestors:
1341 if fparent1 in fparentancestors:
1343 fparent1, fparent2 = fparent2, nullid
1342 fparent1, fparent2 = fparent2, nullid
1344 elif fparent2 in fparentancestors:
1343 elif fparent2 in fparentancestors:
1345 fparent2 = nullid
1344 fparent2 = nullid
1346
1345
1347 # is the file changed?
1346 # is the file changed?
1348 text = fctx.data()
1347 text = fctx.data()
1349 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1348 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1350 changelist.append(fname)
1349 changelist.append(fname)
1351 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1350 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1352 # are just the flags changed during merge?
1351 # are just the flags changed during merge?
1353 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1352 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1354 changelist.append(fname)
1353 changelist.append(fname)
1355
1354
1356 return fparent1
1355 return fparent1
1357
1356
1358 @unfilteredmethod
1357 @unfilteredmethod
1359 def commit(self, text="", user=None, date=None, match=None, force=False,
1358 def commit(self, text="", user=None, date=None, match=None, force=False,
1360 editor=False, extra={}):
1359 editor=False, extra={}):
1361 """Add a new revision to current repository.
1360 """Add a new revision to current repository.
1362
1361
1363 Revision information is gathered from the working directory,
1362 Revision information is gathered from the working directory,
1364 match can be used to filter the committed files. If editor is
1363 match can be used to filter the committed files. If editor is
1365 supplied, it is called to get a commit message.
1364 supplied, it is called to get a commit message.
1366 """
1365 """
1367
1366
1368 def fail(f, msg):
1367 def fail(f, msg):
1369 raise util.Abort('%s: %s' % (f, msg))
1368 raise util.Abort('%s: %s' % (f, msg))
1370
1369
1371 if not match:
1370 if not match:
1372 match = matchmod.always(self.root, '')
1371 match = matchmod.always(self.root, '')
1373
1372
1374 if not force:
1373 if not force:
1375 vdirs = []
1374 vdirs = []
1376 match.explicitdir = vdirs.append
1375 match.explicitdir = vdirs.append
1377 match.bad = fail
1376 match.bad = fail
1378
1377
1379 wlock = self.wlock()
1378 wlock = self.wlock()
1380 try:
1379 try:
1381 wctx = self[None]
1380 wctx = self[None]
1382 merge = len(wctx.parents()) > 1
1381 merge = len(wctx.parents()) > 1
1383
1382
1384 if not force and merge and not match.always():
1383 if not force and merge and not match.always():
1385 raise util.Abort(_('cannot partially commit a merge '
1384 raise util.Abort(_('cannot partially commit a merge '
1386 '(do not specify files or patterns)'))
1385 '(do not specify files or patterns)'))
1387
1386
1388 status = self.status(match=match, clean=force)
1387 status = self.status(match=match, clean=force)
1389 if force:
1388 if force:
1390 status.modified.extend(status.clean) # mq may commit clean files
1389 status.modified.extend(status.clean) # mq may commit clean files
1391
1390
1392 # check subrepos
1391 # check subrepos
1393 subs = []
1392 subs = []
1394 commitsubs = set()
1393 commitsubs = set()
1395 newstate = wctx.substate.copy()
1394 newstate = wctx.substate.copy()
1396 # only manage subrepos and .hgsubstate if .hgsub is present
1395 # only manage subrepos and .hgsubstate if .hgsub is present
1397 if '.hgsub' in wctx:
1396 if '.hgsub' in wctx:
1398 # we'll decide whether to track this ourselves, thanks
1397 # we'll decide whether to track this ourselves, thanks
1399 for c in status.modified, status.added, status.removed:
1398 for c in status.modified, status.added, status.removed:
1400 if '.hgsubstate' in c:
1399 if '.hgsubstate' in c:
1401 c.remove('.hgsubstate')
1400 c.remove('.hgsubstate')
1402
1401
1403 # compare current state to last committed state
1402 # compare current state to last committed state
1404 # build new substate based on last committed state
1403 # build new substate based on last committed state
1405 oldstate = wctx.p1().substate
1404 oldstate = wctx.p1().substate
1406 for s in sorted(newstate.keys()):
1405 for s in sorted(newstate.keys()):
1407 if not match(s):
1406 if not match(s):
1408 # ignore working copy, use old state if present
1407 # ignore working copy, use old state if present
1409 if s in oldstate:
1408 if s in oldstate:
1410 newstate[s] = oldstate[s]
1409 newstate[s] = oldstate[s]
1411 continue
1410 continue
1412 if not force:
1411 if not force:
1413 raise util.Abort(
1412 raise util.Abort(
1414 _("commit with new subrepo %s excluded") % s)
1413 _("commit with new subrepo %s excluded") % s)
1415 dirtyreason = wctx.sub(s).dirtyreason(True)
1414 dirtyreason = wctx.sub(s).dirtyreason(True)
1416 if dirtyreason:
1415 if dirtyreason:
1417 if not self.ui.configbool('ui', 'commitsubrepos'):
1416 if not self.ui.configbool('ui', 'commitsubrepos'):
1418 raise util.Abort(dirtyreason,
1417 raise util.Abort(dirtyreason,
1419 hint=_("use --subrepos for recursive commit"))
1418 hint=_("use --subrepos for recursive commit"))
1420 subs.append(s)
1419 subs.append(s)
1421 commitsubs.add(s)
1420 commitsubs.add(s)
1422 else:
1421 else:
1423 bs = wctx.sub(s).basestate()
1422 bs = wctx.sub(s).basestate()
1424 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1423 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1425 if oldstate.get(s, (None, None, None))[1] != bs:
1424 if oldstate.get(s, (None, None, None))[1] != bs:
1426 subs.append(s)
1425 subs.append(s)
1427
1426
1428 # check for removed subrepos
1427 # check for removed subrepos
1429 for p in wctx.parents():
1428 for p in wctx.parents():
1430 r = [s for s in p.substate if s not in newstate]
1429 r = [s for s in p.substate if s not in newstate]
1431 subs += [s for s in r if match(s)]
1430 subs += [s for s in r if match(s)]
1432 if subs:
1431 if subs:
1433 if (not match('.hgsub') and
1432 if (not match('.hgsub') and
1434 '.hgsub' in (wctx.modified() + wctx.added())):
1433 '.hgsub' in (wctx.modified() + wctx.added())):
1435 raise util.Abort(
1434 raise util.Abort(
1436 _("can't commit subrepos without .hgsub"))
1435 _("can't commit subrepos without .hgsub"))
1437 status.modified.insert(0, '.hgsubstate')
1436 status.modified.insert(0, '.hgsubstate')
1438
1437
1439 elif '.hgsub' in status.removed:
1438 elif '.hgsub' in status.removed:
1440 # clean up .hgsubstate when .hgsub is removed
1439 # clean up .hgsubstate when .hgsub is removed
1441 if ('.hgsubstate' in wctx and
1440 if ('.hgsubstate' in wctx and
1442 '.hgsubstate' not in (status.modified + status.added +
1441 '.hgsubstate' not in (status.modified + status.added +
1443 status.removed)):
1442 status.removed)):
1444 status.removed.insert(0, '.hgsubstate')
1443 status.removed.insert(0, '.hgsubstate')
1445
1444
1446 # make sure all explicit patterns are matched
1445 # make sure all explicit patterns are matched
1447 if not force and match.files():
1446 if not force and match.files():
1448 matched = set(status.modified + status.added + status.removed)
1447 matched = set(status.modified + status.added + status.removed)
1449
1448
1450 for f in match.files():
1449 for f in match.files():
1451 f = self.dirstate.normalize(f)
1450 f = self.dirstate.normalize(f)
1452 if f == '.' or f in matched or f in wctx.substate:
1451 if f == '.' or f in matched or f in wctx.substate:
1453 continue
1452 continue
1454 if f in status.deleted:
1453 if f in status.deleted:
1455 fail(f, _('file not found!'))
1454 fail(f, _('file not found!'))
1456 if f in vdirs: # visited directory
1455 if f in vdirs: # visited directory
1457 d = f + '/'
1456 d = f + '/'
1458 for mf in matched:
1457 for mf in matched:
1459 if mf.startswith(d):
1458 if mf.startswith(d):
1460 break
1459 break
1461 else:
1460 else:
1462 fail(f, _("no match under directory!"))
1461 fail(f, _("no match under directory!"))
1463 elif f not in self.dirstate:
1462 elif f not in self.dirstate:
1464 fail(f, _("file not tracked!"))
1463 fail(f, _("file not tracked!"))
1465
1464
1466 cctx = context.workingcommitctx(self, status,
1465 cctx = context.workingcommitctx(self, status,
1467 text, user, date, extra)
1466 text, user, date, extra)
1468
1467
1469 if (not force and not extra.get("close") and not merge
1468 if (not force and not extra.get("close") and not merge
1470 and not cctx.files()
1469 and not cctx.files()
1471 and wctx.branch() == wctx.p1().branch()):
1470 and wctx.branch() == wctx.p1().branch()):
1472 return None
1471 return None
1473
1472
1474 if merge and cctx.deleted():
1473 if merge and cctx.deleted():
1475 raise util.Abort(_("cannot commit merge with missing files"))
1474 raise util.Abort(_("cannot commit merge with missing files"))
1476
1475
1477 ms = mergemod.mergestate(self)
1476 ms = mergemod.mergestate(self)
1478 for f in status.modified:
1477 for f in status.modified:
1479 if f in ms and ms[f] == 'u':
1478 if f in ms and ms[f] == 'u':
1480 raise util.Abort(_('unresolved merge conflicts '
1479 raise util.Abort(_('unresolved merge conflicts '
1481 '(see "hg help resolve")'))
1480 '(see "hg help resolve")'))
1482
1481
1483 if editor:
1482 if editor:
1484 cctx._text = editor(self, cctx, subs)
1483 cctx._text = editor(self, cctx, subs)
1485 edited = (text != cctx._text)
1484 edited = (text != cctx._text)
1486
1485
1487 # Save commit message in case this transaction gets rolled back
1486 # Save commit message in case this transaction gets rolled back
1488 # (e.g. by a pretxncommit hook). Leave the content alone on
1487 # (e.g. by a pretxncommit hook). Leave the content alone on
1489 # the assumption that the user will use the same editor again.
1488 # the assumption that the user will use the same editor again.
1490 msgfn = self.savecommitmessage(cctx._text)
1489 msgfn = self.savecommitmessage(cctx._text)
1491
1490
1492 # commit subs and write new state
1491 # commit subs and write new state
1493 if subs:
1492 if subs:
1494 for s in sorted(commitsubs):
1493 for s in sorted(commitsubs):
1495 sub = wctx.sub(s)
1494 sub = wctx.sub(s)
1496 self.ui.status(_('committing subrepository %s\n') %
1495 self.ui.status(_('committing subrepository %s\n') %
1497 subrepo.subrelpath(sub))
1496 subrepo.subrelpath(sub))
1498 sr = sub.commit(cctx._text, user, date)
1497 sr = sub.commit(cctx._text, user, date)
1499 newstate[s] = (newstate[s][0], sr)
1498 newstate[s] = (newstate[s][0], sr)
1500 subrepo.writestate(self, newstate)
1499 subrepo.writestate(self, newstate)
1501
1500
1502 p1, p2 = self.dirstate.parents()
1501 p1, p2 = self.dirstate.parents()
1503 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1502 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1504 try:
1503 try:
1505 self.hook("precommit", throw=True, parent1=hookp1,
1504 self.hook("precommit", throw=True, parent1=hookp1,
1506 parent2=hookp2)
1505 parent2=hookp2)
1507 ret = self.commitctx(cctx, True)
1506 ret = self.commitctx(cctx, True)
1508 except: # re-raises
1507 except: # re-raises
1509 if edited:
1508 if edited:
1510 self.ui.write(
1509 self.ui.write(
1511 _('note: commit message saved in %s\n') % msgfn)
1510 _('note: commit message saved in %s\n') % msgfn)
1512 raise
1511 raise
1513
1512
1514 # update bookmarks, dirstate and mergestate
1513 # update bookmarks, dirstate and mergestate
1515 bookmarks.update(self, [p1, p2], ret)
1514 bookmarks.update(self, [p1, p2], ret)
1516 cctx.markcommitted(ret)
1515 cctx.markcommitted(ret)
1517 ms.reset()
1516 ms.reset()
1518 finally:
1517 finally:
1519 wlock.release()
1518 wlock.release()
1520
1519
1521 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1520 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1522 # hack for command that use a temporary commit (eg: histedit)
1521 # hack for command that use a temporary commit (eg: histedit)
1523 # temporary commit got stripped before hook release
1522 # temporary commit got stripped before hook release
1524 if node in self:
1523 if node in self:
1525 self.hook("commit", node=node, parent1=parent1,
1524 self.hook("commit", node=node, parent1=parent1,
1526 parent2=parent2)
1525 parent2=parent2)
1527 self._afterlock(commithook)
1526 self._afterlock(commithook)
1528 return ret
1527 return ret
1529
1528
1530 @unfilteredmethod
1529 @unfilteredmethod
1531 def commitctx(self, ctx, error=False):
1530 def commitctx(self, ctx, error=False):
1532 """Add a new revision to current repository.
1531 """Add a new revision to current repository.
1533 Revision information is passed via the context argument.
1532 Revision information is passed via the context argument.
1534 """
1533 """
1535
1534
1536 tr = None
1535 tr = None
1537 p1, p2 = ctx.p1(), ctx.p2()
1536 p1, p2 = ctx.p1(), ctx.p2()
1538 user = ctx.user()
1537 user = ctx.user()
1539
1538
1540 lock = self.lock()
1539 lock = self.lock()
1541 try:
1540 try:
1542 tr = self.transaction("commit")
1541 tr = self.transaction("commit")
1543 trp = weakref.proxy(tr)
1542 trp = weakref.proxy(tr)
1544
1543
1545 if ctx.files():
1544 if ctx.files():
1546 m1 = p1.manifest()
1545 m1 = p1.manifest()
1547 m2 = p2.manifest()
1546 m2 = p2.manifest()
1548 m = m1.copy()
1547 m = m1.copy()
1549
1548
1550 # check in files
1549 # check in files
1551 added = []
1550 added = []
1552 changed = []
1551 changed = []
1553 removed = list(ctx.removed())
1552 removed = list(ctx.removed())
1554 linkrev = len(self)
1553 linkrev = len(self)
1555 self.ui.note(_("committing files:\n"))
1554 self.ui.note(_("committing files:\n"))
1556 for f in sorted(ctx.modified() + ctx.added()):
1555 for f in sorted(ctx.modified() + ctx.added()):
1557 self.ui.note(f + "\n")
1556 self.ui.note(f + "\n")
1558 try:
1557 try:
1559 fctx = ctx[f]
1558 fctx = ctx[f]
1560 if fctx is None:
1559 if fctx is None:
1561 removed.append(f)
1560 removed.append(f)
1562 else:
1561 else:
1563 added.append(f)
1562 added.append(f)
1564 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1563 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1565 trp, changed)
1564 trp, changed)
1566 m.setflag(f, fctx.flags())
1565 m.setflag(f, fctx.flags())
1567 except OSError, inst:
1566 except OSError, inst:
1568 self.ui.warn(_("trouble committing %s!\n") % f)
1567 self.ui.warn(_("trouble committing %s!\n") % f)
1569 raise
1568 raise
1570 except IOError, inst:
1569 except IOError, inst:
1571 errcode = getattr(inst, 'errno', errno.ENOENT)
1570 errcode = getattr(inst, 'errno', errno.ENOENT)
1572 if error or errcode and errcode != errno.ENOENT:
1571 if error or errcode and errcode != errno.ENOENT:
1573 self.ui.warn(_("trouble committing %s!\n") % f)
1572 self.ui.warn(_("trouble committing %s!\n") % f)
1574 raise
1573 raise
1575
1574
1576 # update manifest
1575 # update manifest
1577 self.ui.note(_("committing manifest\n"))
1576 self.ui.note(_("committing manifest\n"))
1578 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1577 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1579 drop = [f for f in removed if f in m]
1578 drop = [f for f in removed if f in m]
1580 for f in drop:
1579 for f in drop:
1581 del m[f]
1580 del m[f]
1582 mn = self.manifest.add(m, trp, linkrev,
1581 mn = self.manifest.add(m, trp, linkrev,
1583 p1.manifestnode(), p2.manifestnode(),
1582 p1.manifestnode(), p2.manifestnode(),
1584 added, drop)
1583 added, drop)
1585 files = changed + removed
1584 files = changed + removed
1586 else:
1585 else:
1587 mn = p1.manifestnode()
1586 mn = p1.manifestnode()
1588 files = []
1587 files = []
1589
1588
1590 # update changelog
1589 # update changelog
1591 self.ui.note(_("committing changelog\n"))
1590 self.ui.note(_("committing changelog\n"))
1592 self.changelog.delayupdate(tr)
1591 self.changelog.delayupdate(tr)
1593 n = self.changelog.add(mn, files, ctx.description(),
1592 n = self.changelog.add(mn, files, ctx.description(),
1594 trp, p1.node(), p2.node(),
1593 trp, p1.node(), p2.node(),
1595 user, ctx.date(), ctx.extra().copy())
1594 user, ctx.date(), ctx.extra().copy())
1596 p = lambda: tr.writepending() and self.root or ""
1595 p = lambda: tr.writepending() and self.root or ""
1597 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1596 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1598 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1597 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1599 parent2=xp2, pending=p)
1598 parent2=xp2, pending=p)
1600 # set the new commit is proper phase
1599 # set the new commit is proper phase
1601 targetphase = subrepo.newcommitphase(self.ui, ctx)
1600 targetphase = subrepo.newcommitphase(self.ui, ctx)
1602 if targetphase:
1601 if targetphase:
1603 # retract boundary do not alter parent changeset.
1602 # retract boundary do not alter parent changeset.
1604 # if a parent have higher the resulting phase will
1603 # if a parent have higher the resulting phase will
1605 # be compliant anyway
1604 # be compliant anyway
1606 #
1605 #
1607 # if minimal phase was 0 we don't need to retract anything
1606 # if minimal phase was 0 we don't need to retract anything
1608 phases.retractboundary(self, tr, targetphase, [n])
1607 phases.retractboundary(self, tr, targetphase, [n])
1609 tr.close()
1608 tr.close()
1610 branchmap.updatecache(self.filtered('served'))
1609 branchmap.updatecache(self.filtered('served'))
1611 return n
1610 return n
1612 finally:
1611 finally:
1613 if tr:
1612 if tr:
1614 tr.release()
1613 tr.release()
1615 lock.release()
1614 lock.release()
1616
1615
1617 @unfilteredmethod
1616 @unfilteredmethod
1618 def destroying(self):
1617 def destroying(self):
1619 '''Inform the repository that nodes are about to be destroyed.
1618 '''Inform the repository that nodes are about to be destroyed.
1620 Intended for use by strip and rollback, so there's a common
1619 Intended for use by strip and rollback, so there's a common
1621 place for anything that has to be done before destroying history.
1620 place for anything that has to be done before destroying history.
1622
1621
1623 This is mostly useful for saving state that is in memory and waiting
1622 This is mostly useful for saving state that is in memory and waiting
1624 to be flushed when the current lock is released. Because a call to
1623 to be flushed when the current lock is released. Because a call to
1625 destroyed is imminent, the repo will be invalidated causing those
1624 destroyed is imminent, the repo will be invalidated causing those
1626 changes to stay in memory (waiting for the next unlock), or vanish
1625 changes to stay in memory (waiting for the next unlock), or vanish
1627 completely.
1626 completely.
1628 '''
1627 '''
1629 # When using the same lock to commit and strip, the phasecache is left
1628 # When using the same lock to commit and strip, the phasecache is left
1630 # dirty after committing. Then when we strip, the repo is invalidated,
1629 # dirty after committing. Then when we strip, the repo is invalidated,
1631 # causing those changes to disappear.
1630 # causing those changes to disappear.
1632 if '_phasecache' in vars(self):
1631 if '_phasecache' in vars(self):
1633 self._phasecache.write()
1632 self._phasecache.write()
1634
1633
1635 @unfilteredmethod
1634 @unfilteredmethod
1636 def destroyed(self):
1635 def destroyed(self):
1637 '''Inform the repository that nodes have been destroyed.
1636 '''Inform the repository that nodes have been destroyed.
1638 Intended for use by strip and rollback, so there's a common
1637 Intended for use by strip and rollback, so there's a common
1639 place for anything that has to be done after destroying history.
1638 place for anything that has to be done after destroying history.
1640 '''
1639 '''
1641 # When one tries to:
1640 # When one tries to:
1642 # 1) destroy nodes thus calling this method (e.g. strip)
1641 # 1) destroy nodes thus calling this method (e.g. strip)
1643 # 2) use phasecache somewhere (e.g. commit)
1642 # 2) use phasecache somewhere (e.g. commit)
1644 #
1643 #
1645 # then 2) will fail because the phasecache contains nodes that were
1644 # then 2) will fail because the phasecache contains nodes that were
1646 # removed. We can either remove phasecache from the filecache,
1645 # removed. We can either remove phasecache from the filecache,
1647 # causing it to reload next time it is accessed, or simply filter
1646 # causing it to reload next time it is accessed, or simply filter
1648 # the removed nodes now and write the updated cache.
1647 # the removed nodes now and write the updated cache.
1649 self._phasecache.filterunknown(self)
1648 self._phasecache.filterunknown(self)
1650 self._phasecache.write()
1649 self._phasecache.write()
1651
1650
1652 # update the 'served' branch cache to help read only server process
1651 # update the 'served' branch cache to help read only server process
1653 # Thanks to branchcache collaboration this is done from the nearest
1652 # Thanks to branchcache collaboration this is done from the nearest
1654 # filtered subset and it is expected to be fast.
1653 # filtered subset and it is expected to be fast.
1655 branchmap.updatecache(self.filtered('served'))
1654 branchmap.updatecache(self.filtered('served'))
1656
1655
1657 # Ensure the persistent tag cache is updated. Doing it now
1656 # Ensure the persistent tag cache is updated. Doing it now
1658 # means that the tag cache only has to worry about destroyed
1657 # means that the tag cache only has to worry about destroyed
1659 # heads immediately after a strip/rollback. That in turn
1658 # heads immediately after a strip/rollback. That in turn
1660 # guarantees that "cachetip == currenttip" (comparing both rev
1659 # guarantees that "cachetip == currenttip" (comparing both rev
1661 # and node) always means no nodes have been added or destroyed.
1660 # and node) always means no nodes have been added or destroyed.
1662
1661
1663 # XXX this is suboptimal when qrefresh'ing: we strip the current
1662 # XXX this is suboptimal when qrefresh'ing: we strip the current
1664 # head, refresh the tag cache, then immediately add a new head.
1663 # head, refresh the tag cache, then immediately add a new head.
1665 # But I think doing it this way is necessary for the "instant
1664 # But I think doing it this way is necessary for the "instant
1666 # tag cache retrieval" case to work.
1665 # tag cache retrieval" case to work.
1667 self.invalidate()
1666 self.invalidate()
1668
1667
1669 def walk(self, match, node=None):
1668 def walk(self, match, node=None):
1670 '''
1669 '''
1671 walk recursively through the directory tree or a given
1670 walk recursively through the directory tree or a given
1672 changeset, finding all files matched by the match
1671 changeset, finding all files matched by the match
1673 function
1672 function
1674 '''
1673 '''
1675 return self[node].walk(match)
1674 return self[node].walk(match)
1676
1675
1677 def status(self, node1='.', node2=None, match=None,
1676 def status(self, node1='.', node2=None, match=None,
1678 ignored=False, clean=False, unknown=False,
1677 ignored=False, clean=False, unknown=False,
1679 listsubrepos=False):
1678 listsubrepos=False):
1680 '''a convenience method that calls node1.status(node2)'''
1679 '''a convenience method that calls node1.status(node2)'''
1681 return self[node1].status(node2, match, ignored, clean, unknown,
1680 return self[node1].status(node2, match, ignored, clean, unknown,
1682 listsubrepos)
1681 listsubrepos)
1683
1682
1684 def heads(self, start=None):
1683 def heads(self, start=None):
1685 heads = self.changelog.heads(start)
1684 heads = self.changelog.heads(start)
1686 # sort the output in rev descending order
1685 # sort the output in rev descending order
1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1686 return sorted(heads, key=self.changelog.rev, reverse=True)
1688
1687
1689 def branchheads(self, branch=None, start=None, closed=False):
1688 def branchheads(self, branch=None, start=None, closed=False):
1690 '''return a (possibly filtered) list of heads for the given branch
1689 '''return a (possibly filtered) list of heads for the given branch
1691
1690
1692 Heads are returned in topological order, from newest to oldest.
1691 Heads are returned in topological order, from newest to oldest.
1693 If branch is None, use the dirstate branch.
1692 If branch is None, use the dirstate branch.
1694 If start is not None, return only heads reachable from start.
1693 If start is not None, return only heads reachable from start.
1695 If closed is True, return heads that are marked as closed as well.
1694 If closed is True, return heads that are marked as closed as well.
1696 '''
1695 '''
1697 if branch is None:
1696 if branch is None:
1698 branch = self[None].branch()
1697 branch = self[None].branch()
1699 branches = self.branchmap()
1698 branches = self.branchmap()
1700 if branch not in branches:
1699 if branch not in branches:
1701 return []
1700 return []
1702 # the cache returns heads ordered lowest to highest
1701 # the cache returns heads ordered lowest to highest
1703 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1702 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1704 if start is not None:
1703 if start is not None:
1705 # filter out the heads that cannot be reached from startrev
1704 # filter out the heads that cannot be reached from startrev
1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1705 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1707 bheads = [h for h in bheads if h in fbheads]
1706 bheads = [h for h in bheads if h in fbheads]
1708 return bheads
1707 return bheads
1709
1708
1710 def branches(self, nodes):
1709 def branches(self, nodes):
1711 if not nodes:
1710 if not nodes:
1712 nodes = [self.changelog.tip()]
1711 nodes = [self.changelog.tip()]
1713 b = []
1712 b = []
1714 for n in nodes:
1713 for n in nodes:
1715 t = n
1714 t = n
1716 while True:
1715 while True:
1717 p = self.changelog.parents(n)
1716 p = self.changelog.parents(n)
1718 if p[1] != nullid or p[0] == nullid:
1717 if p[1] != nullid or p[0] == nullid:
1719 b.append((t, n, p[0], p[1]))
1718 b.append((t, n, p[0], p[1]))
1720 break
1719 break
1721 n = p[0]
1720 n = p[0]
1722 return b
1721 return b
1723
1722
1724 def between(self, pairs):
1723 def between(self, pairs):
1725 r = []
1724 r = []
1726
1725
1727 for top, bottom in pairs:
1726 for top, bottom in pairs:
1728 n, l, i = top, [], 0
1727 n, l, i = top, [], 0
1729 f = 1
1728 f = 1
1730
1729
1731 while n != bottom and n != nullid:
1730 while n != bottom and n != nullid:
1732 p = self.changelog.parents(n)[0]
1731 p = self.changelog.parents(n)[0]
1733 if i == f:
1732 if i == f:
1734 l.append(n)
1733 l.append(n)
1735 f = f * 2
1734 f = f * 2
1736 n = p
1735 n = p
1737 i += 1
1736 i += 1
1738
1737
1739 r.append(l)
1738 r.append(l)
1740
1739
1741 return r
1740 return r
1742
1741
1743 def checkpush(self, pushop):
1742 def checkpush(self, pushop):
1744 """Extensions can override this function if additional checks have
1743 """Extensions can override this function if additional checks have
1745 to be performed before pushing, or call it if they override push
1744 to be performed before pushing, or call it if they override push
1746 command.
1745 command.
1747 """
1746 """
1748 pass
1747 pass
1749
1748
1750 @unfilteredpropertycache
1749 @unfilteredpropertycache
1751 def prepushoutgoinghooks(self):
1750 def prepushoutgoinghooks(self):
1752 """Return util.hooks consists of "(repo, remote, outgoing)"
1751 """Return util.hooks consists of "(repo, remote, outgoing)"
1753 functions, which are called before pushing changesets.
1752 functions, which are called before pushing changesets.
1754 """
1753 """
1755 return util.hooks()
1754 return util.hooks()
1756
1755
1757 def stream_in(self, remote, requirements):
1756 def stream_in(self, remote, requirements):
1758 lock = self.lock()
1757 lock = self.lock()
1759 try:
1758 try:
1760 # Save remote branchmap. We will use it later
1759 # Save remote branchmap. We will use it later
1761 # to speed up branchcache creation
1760 # to speed up branchcache creation
1762 rbranchmap = None
1761 rbranchmap = None
1763 if remote.capable("branchmap"):
1762 if remote.capable("branchmap"):
1764 rbranchmap = remote.branchmap()
1763 rbranchmap = remote.branchmap()
1765
1764
1766 fp = remote.stream_out()
1765 fp = remote.stream_out()
1767 l = fp.readline()
1766 l = fp.readline()
1768 try:
1767 try:
1769 resp = int(l)
1768 resp = int(l)
1770 except ValueError:
1769 except ValueError:
1771 raise error.ResponseError(
1770 raise error.ResponseError(
1772 _('unexpected response from remote server:'), l)
1771 _('unexpected response from remote server:'), l)
1773 if resp == 1:
1772 if resp == 1:
1774 raise util.Abort(_('operation forbidden by server'))
1773 raise util.Abort(_('operation forbidden by server'))
1775 elif resp == 2:
1774 elif resp == 2:
1776 raise util.Abort(_('locking the remote repository failed'))
1775 raise util.Abort(_('locking the remote repository failed'))
1777 elif resp != 0:
1776 elif resp != 0:
1778 raise util.Abort(_('the server sent an unknown error code'))
1777 raise util.Abort(_('the server sent an unknown error code'))
1779 self.ui.status(_('streaming all changes\n'))
1778 self.ui.status(_('streaming all changes\n'))
1780 l = fp.readline()
1779 l = fp.readline()
1781 try:
1780 try:
1782 total_files, total_bytes = map(int, l.split(' ', 1))
1781 total_files, total_bytes = map(int, l.split(' ', 1))
1783 except (ValueError, TypeError):
1782 except (ValueError, TypeError):
1784 raise error.ResponseError(
1783 raise error.ResponseError(
1785 _('unexpected response from remote server:'), l)
1784 _('unexpected response from remote server:'), l)
1786 self.ui.status(_('%d files to transfer, %s of data\n') %
1785 self.ui.status(_('%d files to transfer, %s of data\n') %
1787 (total_files, util.bytecount(total_bytes)))
1786 (total_files, util.bytecount(total_bytes)))
1788 handled_bytes = 0
1787 handled_bytes = 0
1789 self.ui.progress(_('clone'), 0, total=total_bytes)
1788 self.ui.progress(_('clone'), 0, total=total_bytes)
1790 start = time.time()
1789 start = time.time()
1791
1790
1792 tr = self.transaction(_('clone'))
1791 tr = self.transaction(_('clone'))
1793 try:
1792 try:
1794 for i in xrange(total_files):
1793 for i in xrange(total_files):
1795 # XXX doesn't support '\n' or '\r' in filenames
1794 # XXX doesn't support '\n' or '\r' in filenames
1796 l = fp.readline()
1795 l = fp.readline()
1797 try:
1796 try:
1798 name, size = l.split('\0', 1)
1797 name, size = l.split('\0', 1)
1799 size = int(size)
1798 size = int(size)
1800 except (ValueError, TypeError):
1799 except (ValueError, TypeError):
1801 raise error.ResponseError(
1800 raise error.ResponseError(
1802 _('unexpected response from remote server:'), l)
1801 _('unexpected response from remote server:'), l)
1803 if self.ui.debugflag:
1802 if self.ui.debugflag:
1804 self.ui.debug('adding %s (%s)\n' %
1803 self.ui.debug('adding %s (%s)\n' %
1805 (name, util.bytecount(size)))
1804 (name, util.bytecount(size)))
1806 # for backwards compat, name was partially encoded
1805 # for backwards compat, name was partially encoded
1807 ofp = self.svfs(store.decodedir(name), 'w')
1806 ofp = self.svfs(store.decodedir(name), 'w')
1808 for chunk in util.filechunkiter(fp, limit=size):
1807 for chunk in util.filechunkiter(fp, limit=size):
1809 handled_bytes += len(chunk)
1808 handled_bytes += len(chunk)
1810 self.ui.progress(_('clone'), handled_bytes,
1809 self.ui.progress(_('clone'), handled_bytes,
1811 total=total_bytes)
1810 total=total_bytes)
1812 ofp.write(chunk)
1811 ofp.write(chunk)
1813 ofp.close()
1812 ofp.close()
1814 tr.close()
1813 tr.close()
1815 finally:
1814 finally:
1816 tr.release()
1815 tr.release()
1817
1816
1818 # Writing straight to files circumvented the inmemory caches
1817 # Writing straight to files circumvented the inmemory caches
1819 self.invalidate()
1818 self.invalidate()
1820
1819
1821 elapsed = time.time() - start
1820 elapsed = time.time() - start
1822 if elapsed <= 0:
1821 if elapsed <= 0:
1823 elapsed = 0.001
1822 elapsed = 0.001
1824 self.ui.progress(_('clone'), None)
1823 self.ui.progress(_('clone'), None)
1825 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1824 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1826 (util.bytecount(total_bytes), elapsed,
1825 (util.bytecount(total_bytes), elapsed,
1827 util.bytecount(total_bytes / elapsed)))
1826 util.bytecount(total_bytes / elapsed)))
1828
1827
1829 # new requirements = old non-format requirements +
1828 # new requirements = old non-format requirements +
1830 # new format-related
1829 # new format-related
1831 # requirements from the streamed-in repository
1830 # requirements from the streamed-in repository
1832 requirements.update(set(self.requirements) - self.supportedformats)
1831 requirements.update(set(self.requirements) - self.supportedformats)
1833 self._applyrequirements(requirements)
1832 self._applyrequirements(requirements)
1834 self._writerequirements()
1833 self._writerequirements()
1835
1834
1836 if rbranchmap:
1835 if rbranchmap:
1837 rbheads = []
1836 rbheads = []
1838 closed = []
1837 closed = []
1839 for bheads in rbranchmap.itervalues():
1838 for bheads in rbranchmap.itervalues():
1840 rbheads.extend(bheads)
1839 rbheads.extend(bheads)
1841 for h in bheads:
1840 for h in bheads:
1842 r = self.changelog.rev(h)
1841 r = self.changelog.rev(h)
1843 b, c = self.changelog.branchinfo(r)
1842 b, c = self.changelog.branchinfo(r)
1844 if c:
1843 if c:
1845 closed.append(h)
1844 closed.append(h)
1846
1845
1847 if rbheads:
1846 if rbheads:
1848 rtiprev = max((int(self.changelog.rev(node))
1847 rtiprev = max((int(self.changelog.rev(node))
1849 for node in rbheads))
1848 for node in rbheads))
1850 cache = branchmap.branchcache(rbranchmap,
1849 cache = branchmap.branchcache(rbranchmap,
1851 self[rtiprev].node(),
1850 self[rtiprev].node(),
1852 rtiprev,
1851 rtiprev,
1853 closednodes=closed)
1852 closednodes=closed)
1854 # Try to stick it as low as possible
1853 # Try to stick it as low as possible
1855 # filter above served are unlikely to be fetch from a clone
1854 # filter above served are unlikely to be fetch from a clone
1856 for candidate in ('base', 'immutable', 'served'):
1855 for candidate in ('base', 'immutable', 'served'):
1857 rview = self.filtered(candidate)
1856 rview = self.filtered(candidate)
1858 if cache.validfor(rview):
1857 if cache.validfor(rview):
1859 self._branchcaches[candidate] = cache
1858 self._branchcaches[candidate] = cache
1860 cache.write(rview)
1859 cache.write(rview)
1861 break
1860 break
1862 self.invalidate()
1861 self.invalidate()
1863 return len(self.heads()) + 1
1862 return len(self.heads()) + 1
1864 finally:
1863 finally:
1865 lock.release()
1864 lock.release()
1866
1865
1867 def clone(self, remote, heads=[], stream=None):
1866 def clone(self, remote, heads=[], stream=None):
1868 '''clone remote repository.
1867 '''clone remote repository.
1869
1868
1870 keyword arguments:
1869 keyword arguments:
1871 heads: list of revs to clone (forces use of pull)
1870 heads: list of revs to clone (forces use of pull)
1872 stream: use streaming clone if possible'''
1871 stream: use streaming clone if possible'''
1873
1872
1874 # now, all clients that can request uncompressed clones can
1873 # now, all clients that can request uncompressed clones can
1875 # read repo formats supported by all servers that can serve
1874 # read repo formats supported by all servers that can serve
1876 # them.
1875 # them.
1877
1876
1878 # if revlog format changes, client will have to check version
1877 # if revlog format changes, client will have to check version
1879 # and format flags on "stream" capability, and use
1878 # and format flags on "stream" capability, and use
1880 # uncompressed only if compatible.
1879 # uncompressed only if compatible.
1881
1880
1882 if stream is None:
1881 if stream is None:
1883 # if the server explicitly prefers to stream (for fast LANs)
1882 # if the server explicitly prefers to stream (for fast LANs)
1884 stream = remote.capable('stream-preferred')
1883 stream = remote.capable('stream-preferred')
1885
1884
1886 if stream and not heads:
1885 if stream and not heads:
1887 # 'stream' means remote revlog format is revlogv1 only
1886 # 'stream' means remote revlog format is revlogv1 only
1888 if remote.capable('stream'):
1887 if remote.capable('stream'):
1889 self.stream_in(remote, set(('revlogv1',)))
1888 self.stream_in(remote, set(('revlogv1',)))
1890 else:
1889 else:
1891 # otherwise, 'streamreqs' contains the remote revlog format
1890 # otherwise, 'streamreqs' contains the remote revlog format
1892 streamreqs = remote.capable('streamreqs')
1891 streamreqs = remote.capable('streamreqs')
1893 if streamreqs:
1892 if streamreqs:
1894 streamreqs = set(streamreqs.split(','))
1893 streamreqs = set(streamreqs.split(','))
1895 # if we support it, stream in and adjust our requirements
1894 # if we support it, stream in and adjust our requirements
1896 if not streamreqs - self.supportedformats:
1895 if not streamreqs - self.supportedformats:
1897 self.stream_in(remote, streamreqs)
1896 self.stream_in(remote, streamreqs)
1898
1897
1899 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1898 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1900 try:
1899 try:
1901 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1900 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1902 ret = exchange.pull(self, remote, heads).cgresult
1901 ret = exchange.pull(self, remote, heads).cgresult
1903 finally:
1902 finally:
1904 self.ui.restoreconfig(quiet)
1903 self.ui.restoreconfig(quiet)
1905 return ret
1904 return ret
1906
1905
1907 def pushkey(self, namespace, key, old, new):
1906 def pushkey(self, namespace, key, old, new):
1908 try:
1907 try:
1909 tr = self.currenttransaction()
1908 tr = self.currenttransaction()
1910 hookargs = {}
1909 hookargs = {}
1911 if tr is not None:
1910 if tr is not None:
1912 hookargs.update(tr.hookargs)
1911 hookargs.update(tr.hookargs)
1913 pending = lambda: tr.writepending() and self.root or ""
1912 pending = lambda: tr.writepending() and self.root or ""
1914 hookargs['pending'] = pending
1913 hookargs['pending'] = pending
1915 hookargs['namespace'] = namespace
1914 hookargs['namespace'] = namespace
1916 hookargs['key'] = key
1915 hookargs['key'] = key
1917 hookargs['old'] = old
1916 hookargs['old'] = old
1918 hookargs['new'] = new
1917 hookargs['new'] = new
1919 self.hook('prepushkey', throw=True, **hookargs)
1918 self.hook('prepushkey', throw=True, **hookargs)
1920 except error.HookAbort, exc:
1919 except error.HookAbort, exc:
1921 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1922 if exc.hint:
1921 if exc.hint:
1923 self.ui.write_err(_("(%s)\n") % exc.hint)
1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1924 return False
1923 return False
1925 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1926 ret = pushkey.push(self, namespace, key, old, new)
1925 ret = pushkey.push(self, namespace, key, old, new)
1927 def runhook():
1926 def runhook():
1928 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1929 ret=ret)
1928 ret=ret)
1930 self._afterlock(runhook)
1929 self._afterlock(runhook)
1931 return ret
1930 return ret
1932
1931
1933 def listkeys(self, namespace):
1932 def listkeys(self, namespace):
1934 self.hook('prelistkeys', throw=True, namespace=namespace)
1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1935 self.ui.debug('listing keys for "%s"\n' % namespace)
1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1936 values = pushkey.list(self, namespace)
1935 values = pushkey.list(self, namespace)
1937 self.hook('listkeys', namespace=namespace, values=values)
1936 self.hook('listkeys', namespace=namespace, values=values)
1938 return values
1937 return values
1939
1938
1940 def debugwireargs(self, one, two, three=None, four=None, five=None):
1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1941 '''used to test argument passing over the wire'''
1940 '''used to test argument passing over the wire'''
1942 return "%s %s %s %s %s" % (one, two, three, four, five)
1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1943
1942
1944 def savecommitmessage(self, text):
1943 def savecommitmessage(self, text):
1945 fp = self.vfs('last-message.txt', 'wb')
1944 fp = self.vfs('last-message.txt', 'wb')
1946 try:
1945 try:
1947 fp.write(text)
1946 fp.write(text)
1948 finally:
1947 finally:
1949 fp.close()
1948 fp.close()
1950 return self.pathto(fp.name[len(self.root) + 1:])
1949 return self.pathto(fp.name[len(self.root) + 1:])
1951
1950
1952 # used to avoid circular references so destructors work
1951 # used to avoid circular references so destructors work
1953 def aftertrans(files):
1952 def aftertrans(files):
1954 renamefiles = [tuple(t) for t in files]
1953 renamefiles = [tuple(t) for t in files]
1955 def a():
1954 def a():
1956 for vfs, src, dest in renamefiles:
1955 for vfs, src, dest in renamefiles:
1957 try:
1956 try:
1958 vfs.rename(src, dest)
1957 vfs.rename(src, dest)
1959 except OSError: # journal file does not yet exist
1958 except OSError: # journal file does not yet exist
1960 pass
1959 pass
1961 return a
1960 return a
1962
1961
1963 def undoname(fn):
1962 def undoname(fn):
1964 base, name = os.path.split(fn)
1963 base, name = os.path.split(fn)
1965 assert name.startswith('journal')
1964 assert name.startswith('journal')
1966 return os.path.join(base, name.replace('journal', 'undo', 1))
1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1967
1966
1968 def instance(ui, path, create):
1967 def instance(ui, path, create):
1969 return localrepository(ui, util.urllocalpath(path), create)
1968 return localrepository(ui, util.urllocalpath(path), create)
1970
1969
1971 def islocal(path):
1970 def islocal(path):
1972 return True
1971 return True
General Comments 0
You need to be logged in to leave comments. Login now