##// END OF EJS Templates
devel-warn: move the develwarn function as a method of the ui object...
Pierre-Yves David -
r25629:52e5f68d default
parent child Browse files
Show More
@@ -1,1943 +1,1943
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced, exc:
155 except error.PushRaced, exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
218 self.opener = self.vfs
219 self.baseui = baseui
219 self.baseui = baseui
220 self.ui = baseui.copy()
220 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
225 self._phasedefaults = []
226 try:
226 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
229 except IOError:
229 except IOError:
230 pass
230 pass
231
231
232 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
239 else:
239 else:
240 self.supported = self._basesupported
240 self.supported = self._basesupported
241
241
242 if not self.vfs.isdir():
242 if not self.vfs.isdir():
243 if create:
243 if create:
244 if not self.wvfs.exists():
244 if not self.wvfs.exists():
245 self.wvfs.makedirs()
245 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
250 self.requirements.add("store")
250 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
252 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
255 # create an invalid changelog
255 # create an invalid changelog
256 self.vfs.append(
256 self.vfs.append(
257 "00changelog.i",
257 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
260 )
260 )
261 if self.ui.configbool('format', 'generaldelta', False):
261 if self.ui.configbool('format', 'generaldelta', False):
262 self.requirements.add("generaldelta")
262 self.requirements.add("generaldelta")
263 if self.ui.configbool('experimental', 'treemanifest', False):
263 if self.ui.configbool('experimental', 'treemanifest', False):
264 self.requirements.add("treemanifest")
264 self.requirements.add("treemanifest")
265 if self.ui.configbool('experimental', 'manifestv2', False):
265 if self.ui.configbool('experimental', 'manifestv2', False):
266 self.requirements.add("manifestv2")
266 self.requirements.add("manifestv2")
267 else:
267 else:
268 raise error.RepoError(_("repository %s not found") % path)
268 raise error.RepoError(_("repository %s not found") % path)
269 elif create:
269 elif create:
270 raise error.RepoError(_("repository %s already exists") % path)
270 raise error.RepoError(_("repository %s already exists") % path)
271 else:
271 else:
272 try:
272 try:
273 self.requirements = scmutil.readrequires(
273 self.requirements = scmutil.readrequires(
274 self.vfs, self.supported)
274 self.vfs, self.supported)
275 except IOError, inst:
275 except IOError, inst:
276 if inst.errno != errno.ENOENT:
276 if inst.errno != errno.ENOENT:
277 raise
277 raise
278
278
279 self.sharedpath = self.path
279 self.sharedpath = self.path
280 try:
280 try:
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 realpath=True)
282 realpath=True)
283 s = vfs.base
283 s = vfs.base
284 if not vfs.exists():
284 if not vfs.exists():
285 raise error.RepoError(
285 raise error.RepoError(
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 self.sharedpath = s
287 self.sharedpath = s
288 except IOError, inst:
288 except IOError, inst:
289 if inst.errno != errno.ENOENT:
289 if inst.errno != errno.ENOENT:
290 raise
290 raise
291
291
292 self.store = store.store(
292 self.store = store.store(
293 self.requirements, self.sharedpath, scmutil.vfs)
293 self.requirements, self.sharedpath, scmutil.vfs)
294 self.spath = self.store.path
294 self.spath = self.store.path
295 self.svfs = self.store.vfs
295 self.svfs = self.store.vfs
296 self.sopener = self.svfs
296 self.sopener = self.svfs
297 self.sjoin = self.store.join
297 self.sjoin = self.store.join
298 self.vfs.createmode = self.store.createmode
298 self.vfs.createmode = self.store.createmode
299 self._applyopenerreqs()
299 self._applyopenerreqs()
300 if create:
300 if create:
301 self._writerequirements()
301 self._writerequirements()
302
302
303
303
304 self._branchcaches = {}
304 self._branchcaches = {}
305 self._revbranchcache = None
305 self._revbranchcache = None
306 self.filterpats = {}
306 self.filterpats = {}
307 self._datafilters = {}
307 self._datafilters = {}
308 self._transref = self._lockref = self._wlockref = None
308 self._transref = self._lockref = self._wlockref = None
309
309
310 # A cache for various files under .hg/ that tracks file changes,
310 # A cache for various files under .hg/ that tracks file changes,
311 # (used by the filecache decorator)
311 # (used by the filecache decorator)
312 #
312 #
313 # Maps a property name to its util.filecacheentry
313 # Maps a property name to its util.filecacheentry
314 self._filecache = {}
314 self._filecache = {}
315
315
316 # hold sets of revision to be filtered
316 # hold sets of revision to be filtered
317 # should be cleared when something might have changed the filter value:
317 # should be cleared when something might have changed the filter value:
318 # - new changesets,
318 # - new changesets,
319 # - phase change,
319 # - phase change,
320 # - new obsolescence marker,
320 # - new obsolescence marker,
321 # - working directory parent change,
321 # - working directory parent change,
322 # - bookmark changes
322 # - bookmark changes
323 self.filteredrevcache = {}
323 self.filteredrevcache = {}
324
324
325 # generic mapping between names and nodes
325 # generic mapping between names and nodes
326 self.names = namespaces.namespaces()
326 self.names = namespaces.namespaces()
327
327
328 def close(self):
328 def close(self):
329 self._writecaches()
329 self._writecaches()
330
330
331 def _writecaches(self):
331 def _writecaches(self):
332 if self._revbranchcache:
332 if self._revbranchcache:
333 self._revbranchcache.write()
333 self._revbranchcache.write()
334
334
335 def _restrictcapabilities(self, caps):
335 def _restrictcapabilities(self, caps):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 caps = set(caps)
337 caps = set(caps)
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 caps.add('bundle2=' + urllib.quote(capsblob))
339 caps.add('bundle2=' + urllib.quote(capsblob))
340 return caps
340 return caps
341
341
342 def _applyopenerreqs(self):
342 def _applyopenerreqs(self):
343 self.svfs.options = dict((r, 1) for r in self.requirements
343 self.svfs.options = dict((r, 1) for r in self.requirements
344 if r in self.openerreqs)
344 if r in self.openerreqs)
345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
346 if chunkcachesize is not None:
346 if chunkcachesize is not None:
347 self.svfs.options['chunkcachesize'] = chunkcachesize
347 self.svfs.options['chunkcachesize'] = chunkcachesize
348 maxchainlen = self.ui.configint('format', 'maxchainlen')
348 maxchainlen = self.ui.configint('format', 'maxchainlen')
349 if maxchainlen is not None:
349 if maxchainlen is not None:
350 self.svfs.options['maxchainlen'] = maxchainlen
350 self.svfs.options['maxchainlen'] = maxchainlen
351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
352 if manifestcachesize is not None:
352 if manifestcachesize is not None:
353 self.svfs.options['manifestcachesize'] = manifestcachesize
353 self.svfs.options['manifestcachesize'] = manifestcachesize
354
354
355 def _writerequirements(self):
355 def _writerequirements(self):
356 scmutil.writerequires(self.vfs, self.requirements)
356 scmutil.writerequires(self.vfs, self.requirements)
357
357
358 def _checknested(self, path):
358 def _checknested(self, path):
359 """Determine if path is a legal nested repository."""
359 """Determine if path is a legal nested repository."""
360 if not path.startswith(self.root):
360 if not path.startswith(self.root):
361 return False
361 return False
362 subpath = path[len(self.root) + 1:]
362 subpath = path[len(self.root) + 1:]
363 normsubpath = util.pconvert(subpath)
363 normsubpath = util.pconvert(subpath)
364
364
365 # XXX: Checking against the current working copy is wrong in
365 # XXX: Checking against the current working copy is wrong in
366 # the sense that it can reject things like
366 # the sense that it can reject things like
367 #
367 #
368 # $ hg cat -r 10 sub/x.txt
368 # $ hg cat -r 10 sub/x.txt
369 #
369 #
370 # if sub/ is no longer a subrepository in the working copy
370 # if sub/ is no longer a subrepository in the working copy
371 # parent revision.
371 # parent revision.
372 #
372 #
373 # However, it can of course also allow things that would have
373 # However, it can of course also allow things that would have
374 # been rejected before, such as the above cat command if sub/
374 # been rejected before, such as the above cat command if sub/
375 # is a subrepository now, but was a normal directory before.
375 # is a subrepository now, but was a normal directory before.
376 # The old path auditor would have rejected by mistake since it
376 # The old path auditor would have rejected by mistake since it
377 # panics when it sees sub/.hg/.
377 # panics when it sees sub/.hg/.
378 #
378 #
379 # All in all, checking against the working copy seems sensible
379 # All in all, checking against the working copy seems sensible
380 # since we want to prevent access to nested repositories on
380 # since we want to prevent access to nested repositories on
381 # the filesystem *now*.
381 # the filesystem *now*.
382 ctx = self[None]
382 ctx = self[None]
383 parts = util.splitpath(subpath)
383 parts = util.splitpath(subpath)
384 while parts:
384 while parts:
385 prefix = '/'.join(parts)
385 prefix = '/'.join(parts)
386 if prefix in ctx.substate:
386 if prefix in ctx.substate:
387 if prefix == normsubpath:
387 if prefix == normsubpath:
388 return True
388 return True
389 else:
389 else:
390 sub = ctx.sub(prefix)
390 sub = ctx.sub(prefix)
391 return sub.checknested(subpath[len(prefix) + 1:])
391 return sub.checknested(subpath[len(prefix) + 1:])
392 else:
392 else:
393 parts.pop()
393 parts.pop()
394 return False
394 return False
395
395
396 def peer(self):
396 def peer(self):
397 return localpeer(self) # not cached to avoid reference cycle
397 return localpeer(self) # not cached to avoid reference cycle
398
398
399 def unfiltered(self):
399 def unfiltered(self):
400 """Return unfiltered version of the repository
400 """Return unfiltered version of the repository
401
401
402 Intended to be overwritten by filtered repo."""
402 Intended to be overwritten by filtered repo."""
403 return self
403 return self
404
404
405 def filtered(self, name):
405 def filtered(self, name):
406 """Return a filtered version of a repository"""
406 """Return a filtered version of a repository"""
407 # build a new class with the mixin and the current class
407 # build a new class with the mixin and the current class
408 # (possibly subclass of the repo)
408 # (possibly subclass of the repo)
409 class proxycls(repoview.repoview, self.unfiltered().__class__):
409 class proxycls(repoview.repoview, self.unfiltered().__class__):
410 pass
410 pass
411 return proxycls(self, name)
411 return proxycls(self, name)
412
412
413 @repofilecache('bookmarks')
413 @repofilecache('bookmarks')
414 def _bookmarks(self):
414 def _bookmarks(self):
415 return bookmarks.bmstore(self)
415 return bookmarks.bmstore(self)
416
416
417 @repofilecache('bookmarks.current')
417 @repofilecache('bookmarks.current')
418 def _activebookmark(self):
418 def _activebookmark(self):
419 return bookmarks.readactive(self)
419 return bookmarks.readactive(self)
420
420
421 def bookmarkheads(self, bookmark):
421 def bookmarkheads(self, bookmark):
422 name = bookmark.split('@', 1)[0]
422 name = bookmark.split('@', 1)[0]
423 heads = []
423 heads = []
424 for mark, n in self._bookmarks.iteritems():
424 for mark, n in self._bookmarks.iteritems():
425 if mark.split('@', 1)[0] == name:
425 if mark.split('@', 1)[0] == name:
426 heads.append(n)
426 heads.append(n)
427 return heads
427 return heads
428
428
429 @storecache('phaseroots')
429 @storecache('phaseroots')
430 def _phasecache(self):
430 def _phasecache(self):
431 return phases.phasecache(self, self._phasedefaults)
431 return phases.phasecache(self, self._phasedefaults)
432
432
433 @storecache('obsstore')
433 @storecache('obsstore')
434 def obsstore(self):
434 def obsstore(self):
435 # read default format for new obsstore.
435 # read default format for new obsstore.
436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
437 # rely on obsstore class default when possible.
437 # rely on obsstore class default when possible.
438 kwargs = {}
438 kwargs = {}
439 if defaultformat is not None:
439 if defaultformat is not None:
440 kwargs['defaultformat'] = defaultformat
440 kwargs['defaultformat'] = defaultformat
441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
442 store = obsolete.obsstore(self.svfs, readonly=readonly,
442 store = obsolete.obsstore(self.svfs, readonly=readonly,
443 **kwargs)
443 **kwargs)
444 if store and readonly:
444 if store and readonly:
445 self.ui.warn(
445 self.ui.warn(
446 _('obsolete feature not enabled but %i markers found!\n')
446 _('obsolete feature not enabled but %i markers found!\n')
447 % len(list(store)))
447 % len(list(store)))
448 return store
448 return store
449
449
450 @storecache('00changelog.i')
450 @storecache('00changelog.i')
451 def changelog(self):
451 def changelog(self):
452 c = changelog.changelog(self.svfs)
452 c = changelog.changelog(self.svfs)
453 if 'HG_PENDING' in os.environ:
453 if 'HG_PENDING' in os.environ:
454 p = os.environ['HG_PENDING']
454 p = os.environ['HG_PENDING']
455 if p.startswith(self.root):
455 if p.startswith(self.root):
456 c.readpending('00changelog.i.a')
456 c.readpending('00changelog.i.a')
457 return c
457 return c
458
458
459 @storecache('00manifest.i')
459 @storecache('00manifest.i')
460 def manifest(self):
460 def manifest(self):
461 return manifest.manifest(self.svfs)
461 return manifest.manifest(self.svfs)
462
462
463 def dirlog(self, dir):
463 def dirlog(self, dir):
464 return self.manifest.dirlog(dir)
464 return self.manifest.dirlog(dir)
465
465
466 @repofilecache('dirstate')
466 @repofilecache('dirstate')
467 def dirstate(self):
467 def dirstate(self):
468 warned = [0]
468 warned = [0]
469 def validate(node):
469 def validate(node):
470 try:
470 try:
471 self.changelog.rev(node)
471 self.changelog.rev(node)
472 return node
472 return node
473 except error.LookupError:
473 except error.LookupError:
474 if not warned[0]:
474 if not warned[0]:
475 warned[0] = True
475 warned[0] = True
476 self.ui.warn(_("warning: ignoring unknown"
476 self.ui.warn(_("warning: ignoring unknown"
477 " working parent %s!\n") % short(node))
477 " working parent %s!\n") % short(node))
478 return nullid
478 return nullid
479
479
480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
481
481
482 def __getitem__(self, changeid):
482 def __getitem__(self, changeid):
483 if changeid is None:
483 if changeid is None:
484 return context.workingctx(self)
484 return context.workingctx(self)
485 if isinstance(changeid, slice):
485 if isinstance(changeid, slice):
486 return [context.changectx(self, i)
486 return [context.changectx(self, i)
487 for i in xrange(*changeid.indices(len(self)))
487 for i in xrange(*changeid.indices(len(self)))
488 if i not in self.changelog.filteredrevs]
488 if i not in self.changelog.filteredrevs]
489 return context.changectx(self, changeid)
489 return context.changectx(self, changeid)
490
490
491 def __contains__(self, changeid):
491 def __contains__(self, changeid):
492 try:
492 try:
493 self[changeid]
493 self[changeid]
494 return True
494 return True
495 except error.RepoLookupError:
495 except error.RepoLookupError:
496 return False
496 return False
497
497
498 def __nonzero__(self):
498 def __nonzero__(self):
499 return True
499 return True
500
500
501 def __len__(self):
501 def __len__(self):
502 return len(self.changelog)
502 return len(self.changelog)
503
503
504 def __iter__(self):
504 def __iter__(self):
505 return iter(self.changelog)
505 return iter(self.changelog)
506
506
507 def revs(self, expr, *args):
507 def revs(self, expr, *args):
508 '''Return a list of revisions matching the given revset'''
508 '''Return a list of revisions matching the given revset'''
509 expr = revset.formatspec(expr, *args)
509 expr = revset.formatspec(expr, *args)
510 m = revset.match(None, expr)
510 m = revset.match(None, expr)
511 return m(self)
511 return m(self)
512
512
513 def set(self, expr, *args):
513 def set(self, expr, *args):
514 '''
514 '''
515 Yield a context for each matching revision, after doing arg
515 Yield a context for each matching revision, after doing arg
516 replacement via revset.formatspec
516 replacement via revset.formatspec
517 '''
517 '''
518 for r in self.revs(expr, *args):
518 for r in self.revs(expr, *args):
519 yield self[r]
519 yield self[r]
520
520
521 def url(self):
521 def url(self):
522 return 'file:' + self.root
522 return 'file:' + self.root
523
523
524 def hook(self, name, throw=False, **args):
524 def hook(self, name, throw=False, **args):
525 """Call a hook, passing this repo instance.
525 """Call a hook, passing this repo instance.
526
526
527 This a convenience method to aid invoking hooks. Extensions likely
527 This a convenience method to aid invoking hooks. Extensions likely
528 won't call this unless they have registered a custom hook or are
528 won't call this unless they have registered a custom hook or are
529 replacing code that is expected to call a hook.
529 replacing code that is expected to call a hook.
530 """
530 """
531 return hook.hook(self.ui, self, name, throw, **args)
531 return hook.hook(self.ui, self, name, throw, **args)
532
532
533 @unfilteredmethod
533 @unfilteredmethod
534 def _tag(self, names, node, message, local, user, date, extra={},
534 def _tag(self, names, node, message, local, user, date, extra={},
535 editor=False):
535 editor=False):
536 if isinstance(names, str):
536 if isinstance(names, str):
537 names = (names,)
537 names = (names,)
538
538
539 branches = self.branchmap()
539 branches = self.branchmap()
540 for name in names:
540 for name in names:
541 self.hook('pretag', throw=True, node=hex(node), tag=name,
541 self.hook('pretag', throw=True, node=hex(node), tag=name,
542 local=local)
542 local=local)
543 if name in branches:
543 if name in branches:
544 self.ui.warn(_("warning: tag %s conflicts with existing"
544 self.ui.warn(_("warning: tag %s conflicts with existing"
545 " branch name\n") % name)
545 " branch name\n") % name)
546
546
547 def writetags(fp, names, munge, prevtags):
547 def writetags(fp, names, munge, prevtags):
548 fp.seek(0, 2)
548 fp.seek(0, 2)
549 if prevtags and prevtags[-1] != '\n':
549 if prevtags and prevtags[-1] != '\n':
550 fp.write('\n')
550 fp.write('\n')
551 for name in names:
551 for name in names:
552 if munge:
552 if munge:
553 m = munge(name)
553 m = munge(name)
554 else:
554 else:
555 m = name
555 m = name
556
556
557 if (self._tagscache.tagtypes and
557 if (self._tagscache.tagtypes and
558 name in self._tagscache.tagtypes):
558 name in self._tagscache.tagtypes):
559 old = self.tags().get(name, nullid)
559 old = self.tags().get(name, nullid)
560 fp.write('%s %s\n' % (hex(old), m))
560 fp.write('%s %s\n' % (hex(old), m))
561 fp.write('%s %s\n' % (hex(node), m))
561 fp.write('%s %s\n' % (hex(node), m))
562 fp.close()
562 fp.close()
563
563
564 prevtags = ''
564 prevtags = ''
565 if local:
565 if local:
566 try:
566 try:
567 fp = self.vfs('localtags', 'r+')
567 fp = self.vfs('localtags', 'r+')
568 except IOError:
568 except IOError:
569 fp = self.vfs('localtags', 'a')
569 fp = self.vfs('localtags', 'a')
570 else:
570 else:
571 prevtags = fp.read()
571 prevtags = fp.read()
572
572
573 # local tags are stored in the current charset
573 # local tags are stored in the current charset
574 writetags(fp, names, None, prevtags)
574 writetags(fp, names, None, prevtags)
575 for name in names:
575 for name in names:
576 self.hook('tag', node=hex(node), tag=name, local=local)
576 self.hook('tag', node=hex(node), tag=name, local=local)
577 return
577 return
578
578
579 try:
579 try:
580 fp = self.wfile('.hgtags', 'rb+')
580 fp = self.wfile('.hgtags', 'rb+')
581 except IOError, e:
581 except IOError, e:
582 if e.errno != errno.ENOENT:
582 if e.errno != errno.ENOENT:
583 raise
583 raise
584 fp = self.wfile('.hgtags', 'ab')
584 fp = self.wfile('.hgtags', 'ab')
585 else:
585 else:
586 prevtags = fp.read()
586 prevtags = fp.read()
587
587
588 # committed tags are stored in UTF-8
588 # committed tags are stored in UTF-8
589 writetags(fp, names, encoding.fromlocal, prevtags)
589 writetags(fp, names, encoding.fromlocal, prevtags)
590
590
591 fp.close()
591 fp.close()
592
592
593 self.invalidatecaches()
593 self.invalidatecaches()
594
594
595 if '.hgtags' not in self.dirstate:
595 if '.hgtags' not in self.dirstate:
596 self[None].add(['.hgtags'])
596 self[None].add(['.hgtags'])
597
597
598 m = matchmod.exact(self.root, '', ['.hgtags'])
598 m = matchmod.exact(self.root, '', ['.hgtags'])
599 tagnode = self.commit(message, user, date, extra=extra, match=m,
599 tagnode = self.commit(message, user, date, extra=extra, match=m,
600 editor=editor)
600 editor=editor)
601
601
602 for name in names:
602 for name in names:
603 self.hook('tag', node=hex(node), tag=name, local=local)
603 self.hook('tag', node=hex(node), tag=name, local=local)
604
604
605 return tagnode
605 return tagnode
606
606
607 def tag(self, names, node, message, local, user, date, editor=False):
607 def tag(self, names, node, message, local, user, date, editor=False):
608 '''tag a revision with one or more symbolic names.
608 '''tag a revision with one or more symbolic names.
609
609
610 names is a list of strings or, when adding a single tag, names may be a
610 names is a list of strings or, when adding a single tag, names may be a
611 string.
611 string.
612
612
613 if local is True, the tags are stored in a per-repository file.
613 if local is True, the tags are stored in a per-repository file.
614 otherwise, they are stored in the .hgtags file, and a new
614 otherwise, they are stored in the .hgtags file, and a new
615 changeset is committed with the change.
615 changeset is committed with the change.
616
616
617 keyword arguments:
617 keyword arguments:
618
618
619 local: whether to store tags in non-version-controlled file
619 local: whether to store tags in non-version-controlled file
620 (default False)
620 (default False)
621
621
622 message: commit message to use if committing
622 message: commit message to use if committing
623
623
624 user: name of user to use if committing
624 user: name of user to use if committing
625
625
626 date: date tuple to use if committing'''
626 date: date tuple to use if committing'''
627
627
628 if not local:
628 if not local:
629 m = matchmod.exact(self.root, '', ['.hgtags'])
629 m = matchmod.exact(self.root, '', ['.hgtags'])
630 if any(self.status(match=m, unknown=True, ignored=True)):
630 if any(self.status(match=m, unknown=True, ignored=True)):
631 raise util.Abort(_('working copy of .hgtags is changed'),
631 raise util.Abort(_('working copy of .hgtags is changed'),
632 hint=_('please commit .hgtags manually'))
632 hint=_('please commit .hgtags manually'))
633
633
634 self.tags() # instantiate the cache
634 self.tags() # instantiate the cache
635 self._tag(names, node, message, local, user, date, editor=editor)
635 self._tag(names, node, message, local, user, date, editor=editor)
636
636
637 @filteredpropertycache
637 @filteredpropertycache
638 def _tagscache(self):
638 def _tagscache(self):
639 '''Returns a tagscache object that contains various tags related
639 '''Returns a tagscache object that contains various tags related
640 caches.'''
640 caches.'''
641
641
642 # This simplifies its cache management by having one decorated
642 # This simplifies its cache management by having one decorated
643 # function (this one) and the rest simply fetch things from it.
643 # function (this one) and the rest simply fetch things from it.
644 class tagscache(object):
644 class tagscache(object):
645 def __init__(self):
645 def __init__(self):
646 # These two define the set of tags for this repository. tags
646 # These two define the set of tags for this repository. tags
647 # maps tag name to node; tagtypes maps tag name to 'global' or
647 # maps tag name to node; tagtypes maps tag name to 'global' or
648 # 'local'. (Global tags are defined by .hgtags across all
648 # 'local'. (Global tags are defined by .hgtags across all
649 # heads, and local tags are defined in .hg/localtags.)
649 # heads, and local tags are defined in .hg/localtags.)
650 # They constitute the in-memory cache of tags.
650 # They constitute the in-memory cache of tags.
651 self.tags = self.tagtypes = None
651 self.tags = self.tagtypes = None
652
652
653 self.nodetagscache = self.tagslist = None
653 self.nodetagscache = self.tagslist = None
654
654
655 cache = tagscache()
655 cache = tagscache()
656 cache.tags, cache.tagtypes = self._findtags()
656 cache.tags, cache.tagtypes = self._findtags()
657
657
658 return cache
658 return cache
659
659
660 def tags(self):
660 def tags(self):
661 '''return a mapping of tag to node'''
661 '''return a mapping of tag to node'''
662 t = {}
662 t = {}
663 if self.changelog.filteredrevs:
663 if self.changelog.filteredrevs:
664 tags, tt = self._findtags()
664 tags, tt = self._findtags()
665 else:
665 else:
666 tags = self._tagscache.tags
666 tags = self._tagscache.tags
667 for k, v in tags.iteritems():
667 for k, v in tags.iteritems():
668 try:
668 try:
669 # ignore tags to unknown nodes
669 # ignore tags to unknown nodes
670 self.changelog.rev(v)
670 self.changelog.rev(v)
671 t[k] = v
671 t[k] = v
672 except (error.LookupError, ValueError):
672 except (error.LookupError, ValueError):
673 pass
673 pass
674 return t
674 return t
675
675
676 def _findtags(self):
676 def _findtags(self):
677 '''Do the hard work of finding tags. Return a pair of dicts
677 '''Do the hard work of finding tags. Return a pair of dicts
678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
679 maps tag name to a string like \'global\' or \'local\'.
679 maps tag name to a string like \'global\' or \'local\'.
680 Subclasses or extensions are free to add their own tags, but
680 Subclasses or extensions are free to add their own tags, but
681 should be aware that the returned dicts will be retained for the
681 should be aware that the returned dicts will be retained for the
682 duration of the localrepo object.'''
682 duration of the localrepo object.'''
683
683
684 # XXX what tagtype should subclasses/extensions use? Currently
684 # XXX what tagtype should subclasses/extensions use? Currently
685 # mq and bookmarks add tags, but do not set the tagtype at all.
685 # mq and bookmarks add tags, but do not set the tagtype at all.
686 # Should each extension invent its own tag type? Should there
686 # Should each extension invent its own tag type? Should there
687 # be one tagtype for all such "virtual" tags? Or is the status
687 # be one tagtype for all such "virtual" tags? Or is the status
688 # quo fine?
688 # quo fine?
689
689
690 alltags = {} # map tag name to (node, hist)
690 alltags = {} # map tag name to (node, hist)
691 tagtypes = {}
691 tagtypes = {}
692
692
693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
695
695
696 # Build the return dicts. Have to re-encode tag names because
696 # Build the return dicts. Have to re-encode tag names because
697 # the tags module always uses UTF-8 (in order not to lose info
697 # the tags module always uses UTF-8 (in order not to lose info
698 # writing to the cache), but the rest of Mercurial wants them in
698 # writing to the cache), but the rest of Mercurial wants them in
699 # local encoding.
699 # local encoding.
700 tags = {}
700 tags = {}
701 for (name, (node, hist)) in alltags.iteritems():
701 for (name, (node, hist)) in alltags.iteritems():
702 if node != nullid:
702 if node != nullid:
703 tags[encoding.tolocal(name)] = node
703 tags[encoding.tolocal(name)] = node
704 tags['tip'] = self.changelog.tip()
704 tags['tip'] = self.changelog.tip()
705 tagtypes = dict([(encoding.tolocal(name), value)
705 tagtypes = dict([(encoding.tolocal(name), value)
706 for (name, value) in tagtypes.iteritems()])
706 for (name, value) in tagtypes.iteritems()])
707 return (tags, tagtypes)
707 return (tags, tagtypes)
708
708
709 def tagtype(self, tagname):
709 def tagtype(self, tagname):
710 '''
710 '''
711 return the type of the given tag. result can be:
711 return the type of the given tag. result can be:
712
712
713 'local' : a local tag
713 'local' : a local tag
714 'global' : a global tag
714 'global' : a global tag
715 None : tag does not exist
715 None : tag does not exist
716 '''
716 '''
717
717
718 return self._tagscache.tagtypes.get(tagname)
718 return self._tagscache.tagtypes.get(tagname)
719
719
720 def tagslist(self):
720 def tagslist(self):
721 '''return a list of tags ordered by revision'''
721 '''return a list of tags ordered by revision'''
722 if not self._tagscache.tagslist:
722 if not self._tagscache.tagslist:
723 l = []
723 l = []
724 for t, n in self.tags().iteritems():
724 for t, n in self.tags().iteritems():
725 l.append((self.changelog.rev(n), t, n))
725 l.append((self.changelog.rev(n), t, n))
726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
727
727
728 return self._tagscache.tagslist
728 return self._tagscache.tagslist
729
729
730 def nodetags(self, node):
730 def nodetags(self, node):
731 '''return the tags associated with a node'''
731 '''return the tags associated with a node'''
732 if not self._tagscache.nodetagscache:
732 if not self._tagscache.nodetagscache:
733 nodetagscache = {}
733 nodetagscache = {}
734 for t, n in self._tagscache.tags.iteritems():
734 for t, n in self._tagscache.tags.iteritems():
735 nodetagscache.setdefault(n, []).append(t)
735 nodetagscache.setdefault(n, []).append(t)
736 for tags in nodetagscache.itervalues():
736 for tags in nodetagscache.itervalues():
737 tags.sort()
737 tags.sort()
738 self._tagscache.nodetagscache = nodetagscache
738 self._tagscache.nodetagscache = nodetagscache
739 return self._tagscache.nodetagscache.get(node, [])
739 return self._tagscache.nodetagscache.get(node, [])
740
740
741 def nodebookmarks(self, node):
741 def nodebookmarks(self, node):
742 marks = []
742 marks = []
743 for bookmark, n in self._bookmarks.iteritems():
743 for bookmark, n in self._bookmarks.iteritems():
744 if n == node:
744 if n == node:
745 marks.append(bookmark)
745 marks.append(bookmark)
746 return sorted(marks)
746 return sorted(marks)
747
747
748 def branchmap(self):
748 def branchmap(self):
749 '''returns a dictionary {branch: [branchheads]} with branchheads
749 '''returns a dictionary {branch: [branchheads]} with branchheads
750 ordered by increasing revision number'''
750 ordered by increasing revision number'''
751 branchmap.updatecache(self)
751 branchmap.updatecache(self)
752 return self._branchcaches[self.filtername]
752 return self._branchcaches[self.filtername]
753
753
754 @unfilteredmethod
754 @unfilteredmethod
755 def revbranchcache(self):
755 def revbranchcache(self):
756 if not self._revbranchcache:
756 if not self._revbranchcache:
757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
758 return self._revbranchcache
758 return self._revbranchcache
759
759
760 def branchtip(self, branch, ignoremissing=False):
760 def branchtip(self, branch, ignoremissing=False):
761 '''return the tip node for a given branch
761 '''return the tip node for a given branch
762
762
763 If ignoremissing is True, then this method will not raise an error.
763 If ignoremissing is True, then this method will not raise an error.
764 This is helpful for callers that only expect None for a missing branch
764 This is helpful for callers that only expect None for a missing branch
765 (e.g. namespace).
765 (e.g. namespace).
766
766
767 '''
767 '''
768 try:
768 try:
769 return self.branchmap().branchtip(branch)
769 return self.branchmap().branchtip(branch)
770 except KeyError:
770 except KeyError:
771 if not ignoremissing:
771 if not ignoremissing:
772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
773 else:
773 else:
774 pass
774 pass
775
775
776 def lookup(self, key):
776 def lookup(self, key):
777 return self[key].node()
777 return self[key].node()
778
778
779 def lookupbranch(self, key, remote=None):
779 def lookupbranch(self, key, remote=None):
780 repo = remote or self
780 repo = remote or self
781 if key in repo.branchmap():
781 if key in repo.branchmap():
782 return key
782 return key
783
783
784 repo = (remote and remote.local()) and remote or self
784 repo = (remote and remote.local()) and remote or self
785 return repo[key].branch()
785 return repo[key].branch()
786
786
787 def known(self, nodes):
787 def known(self, nodes):
788 nm = self.changelog.nodemap
788 nm = self.changelog.nodemap
789 pc = self._phasecache
789 pc = self._phasecache
790 result = []
790 result = []
791 for n in nodes:
791 for n in nodes:
792 r = nm.get(n)
792 r = nm.get(n)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 result.append(resp)
794 result.append(resp)
795 return result
795 return result
796
796
797 def local(self):
797 def local(self):
798 return self
798 return self
799
799
800 def publishing(self):
800 def publishing(self):
801 # it's safe (and desirable) to trust the publish flag unconditionally
801 # it's safe (and desirable) to trust the publish flag unconditionally
802 # so that we don't finalize changes shared between users via ssh or nfs
802 # so that we don't finalize changes shared between users via ssh or nfs
803 return self.ui.configbool('phases', 'publish', True, untrusted=True)
803 return self.ui.configbool('phases', 'publish', True, untrusted=True)
804
804
805 def cancopy(self):
805 def cancopy(self):
806 # so statichttprepo's override of local() works
806 # so statichttprepo's override of local() works
807 if not self.local():
807 if not self.local():
808 return False
808 return False
809 if not self.publishing():
809 if not self.publishing():
810 return True
810 return True
811 # if publishing we can't copy if there is filtered content
811 # if publishing we can't copy if there is filtered content
812 return not self.filtered('visible').changelog.filteredrevs
812 return not self.filtered('visible').changelog.filteredrevs
813
813
814 def shared(self):
814 def shared(self):
815 '''the type of shared repository (None if not shared)'''
815 '''the type of shared repository (None if not shared)'''
816 if self.sharedpath != self.path:
816 if self.sharedpath != self.path:
817 return 'store'
817 return 'store'
818 return None
818 return None
819
819
820 def join(self, f, *insidef):
820 def join(self, f, *insidef):
821 return self.vfs.join(os.path.join(f, *insidef))
821 return self.vfs.join(os.path.join(f, *insidef))
822
822
823 def wjoin(self, f, *insidef):
823 def wjoin(self, f, *insidef):
824 return self.vfs.reljoin(self.root, f, *insidef)
824 return self.vfs.reljoin(self.root, f, *insidef)
825
825
826 def file(self, f):
826 def file(self, f):
827 if f[0] == '/':
827 if f[0] == '/':
828 f = f[1:]
828 f = f[1:]
829 return filelog.filelog(self.svfs, f)
829 return filelog.filelog(self.svfs, f)
830
830
831 def changectx(self, changeid):
831 def changectx(self, changeid):
832 return self[changeid]
832 return self[changeid]
833
833
834 def parents(self, changeid=None):
834 def parents(self, changeid=None):
835 '''get list of changectxs for parents of changeid'''
835 '''get list of changectxs for parents of changeid'''
836 return self[changeid].parents()
836 return self[changeid].parents()
837
837
838 def setparents(self, p1, p2=nullid):
838 def setparents(self, p1, p2=nullid):
839 self.dirstate.beginparentchange()
839 self.dirstate.beginparentchange()
840 copies = self.dirstate.setparents(p1, p2)
840 copies = self.dirstate.setparents(p1, p2)
841 pctx = self[p1]
841 pctx = self[p1]
842 if copies:
842 if copies:
843 # Adjust copy records, the dirstate cannot do it, it
843 # Adjust copy records, the dirstate cannot do it, it
844 # requires access to parents manifests. Preserve them
844 # requires access to parents manifests. Preserve them
845 # only for entries added to first parent.
845 # only for entries added to first parent.
846 for f in copies:
846 for f in copies:
847 if f not in pctx and copies[f] in pctx:
847 if f not in pctx and copies[f] in pctx:
848 self.dirstate.copy(copies[f], f)
848 self.dirstate.copy(copies[f], f)
849 if p2 == nullid:
849 if p2 == nullid:
850 for f, s in sorted(self.dirstate.copies().items()):
850 for f, s in sorted(self.dirstate.copies().items()):
851 if f not in pctx and s not in pctx:
851 if f not in pctx and s not in pctx:
852 self.dirstate.copy(None, f)
852 self.dirstate.copy(None, f)
853 self.dirstate.endparentchange()
853 self.dirstate.endparentchange()
854
854
855 def filectx(self, path, changeid=None, fileid=None):
855 def filectx(self, path, changeid=None, fileid=None):
856 """changeid can be a changeset revision, node, or tag.
856 """changeid can be a changeset revision, node, or tag.
857 fileid can be a file revision or node."""
857 fileid can be a file revision or node."""
858 return context.filectx(self, path, changeid, fileid)
858 return context.filectx(self, path, changeid, fileid)
859
859
860 def getcwd(self):
860 def getcwd(self):
861 return self.dirstate.getcwd()
861 return self.dirstate.getcwd()
862
862
863 def pathto(self, f, cwd=None):
863 def pathto(self, f, cwd=None):
864 return self.dirstate.pathto(f, cwd)
864 return self.dirstate.pathto(f, cwd)
865
865
866 def wfile(self, f, mode='r'):
866 def wfile(self, f, mode='r'):
867 return self.wvfs(f, mode)
867 return self.wvfs(f, mode)
868
868
869 def _link(self, f):
869 def _link(self, f):
870 return self.wvfs.islink(f)
870 return self.wvfs.islink(f)
871
871
872 def _loadfilter(self, filter):
872 def _loadfilter(self, filter):
873 if filter not in self.filterpats:
873 if filter not in self.filterpats:
874 l = []
874 l = []
875 for pat, cmd in self.ui.configitems(filter):
875 for pat, cmd in self.ui.configitems(filter):
876 if cmd == '!':
876 if cmd == '!':
877 continue
877 continue
878 mf = matchmod.match(self.root, '', [pat])
878 mf = matchmod.match(self.root, '', [pat])
879 fn = None
879 fn = None
880 params = cmd
880 params = cmd
881 for name, filterfn in self._datafilters.iteritems():
881 for name, filterfn in self._datafilters.iteritems():
882 if cmd.startswith(name):
882 if cmd.startswith(name):
883 fn = filterfn
883 fn = filterfn
884 params = cmd[len(name):].lstrip()
884 params = cmd[len(name):].lstrip()
885 break
885 break
886 if not fn:
886 if not fn:
887 fn = lambda s, c, **kwargs: util.filter(s, c)
887 fn = lambda s, c, **kwargs: util.filter(s, c)
888 # Wrap old filters not supporting keyword arguments
888 # Wrap old filters not supporting keyword arguments
889 if not inspect.getargspec(fn)[2]:
889 if not inspect.getargspec(fn)[2]:
890 oldfn = fn
890 oldfn = fn
891 fn = lambda s, c, **kwargs: oldfn(s, c)
891 fn = lambda s, c, **kwargs: oldfn(s, c)
892 l.append((mf, fn, params))
892 l.append((mf, fn, params))
893 self.filterpats[filter] = l
893 self.filterpats[filter] = l
894 return self.filterpats[filter]
894 return self.filterpats[filter]
895
895
896 def _filter(self, filterpats, filename, data):
896 def _filter(self, filterpats, filename, data):
897 for mf, fn, cmd in filterpats:
897 for mf, fn, cmd in filterpats:
898 if mf(filename):
898 if mf(filename):
899 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
899 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
900 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
900 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
901 break
901 break
902
902
903 return data
903 return data
904
904
905 @unfilteredpropertycache
905 @unfilteredpropertycache
906 def _encodefilterpats(self):
906 def _encodefilterpats(self):
907 return self._loadfilter('encode')
907 return self._loadfilter('encode')
908
908
909 @unfilteredpropertycache
909 @unfilteredpropertycache
910 def _decodefilterpats(self):
910 def _decodefilterpats(self):
911 return self._loadfilter('decode')
911 return self._loadfilter('decode')
912
912
913 def adddatafilter(self, name, filter):
913 def adddatafilter(self, name, filter):
914 self._datafilters[name] = filter
914 self._datafilters[name] = filter
915
915
916 def wread(self, filename):
916 def wread(self, filename):
917 if self._link(filename):
917 if self._link(filename):
918 data = self.wvfs.readlink(filename)
918 data = self.wvfs.readlink(filename)
919 else:
919 else:
920 data = self.wvfs.read(filename)
920 data = self.wvfs.read(filename)
921 return self._filter(self._encodefilterpats, filename, data)
921 return self._filter(self._encodefilterpats, filename, data)
922
922
923 def wwrite(self, filename, data, flags):
923 def wwrite(self, filename, data, flags):
924 """write ``data`` into ``filename`` in the working directory
924 """write ``data`` into ``filename`` in the working directory
925
925
926 This returns length of written (maybe decoded) data.
926 This returns length of written (maybe decoded) data.
927 """
927 """
928 data = self._filter(self._decodefilterpats, filename, data)
928 data = self._filter(self._decodefilterpats, filename, data)
929 if 'l' in flags:
929 if 'l' in flags:
930 self.wvfs.symlink(data, filename)
930 self.wvfs.symlink(data, filename)
931 else:
931 else:
932 self.wvfs.write(filename, data)
932 self.wvfs.write(filename, data)
933 if 'x' in flags:
933 if 'x' in flags:
934 self.wvfs.setflags(filename, False, True)
934 self.wvfs.setflags(filename, False, True)
935 return len(data)
935 return len(data)
936
936
937 def wwritedata(self, filename, data):
937 def wwritedata(self, filename, data):
938 return self._filter(self._decodefilterpats, filename, data)
938 return self._filter(self._decodefilterpats, filename, data)
939
939
940 def currenttransaction(self):
940 def currenttransaction(self):
941 """return the current transaction or None if non exists"""
941 """return the current transaction or None if non exists"""
942 if self._transref:
942 if self._transref:
943 tr = self._transref()
943 tr = self._transref()
944 else:
944 else:
945 tr = None
945 tr = None
946
946
947 if tr and tr.running():
947 if tr and tr.running():
948 return tr
948 return tr
949 return None
949 return None
950
950
951 def transaction(self, desc, report=None):
951 def transaction(self, desc, report=None):
952 if (self.ui.configbool('devel', 'all-warnings')
952 if (self.ui.configbool('devel', 'all-warnings')
953 or self.ui.configbool('devel', 'check-locks')):
953 or self.ui.configbool('devel', 'check-locks')):
954 l = self._lockref and self._lockref()
954 l = self._lockref and self._lockref()
955 if l is None or not l.held:
955 if l is None or not l.held:
956 scmutil.develwarn(self.ui, 'transaction with no lock')
956 self.ui.develwarn('transaction with no lock')
957 tr = self.currenttransaction()
957 tr = self.currenttransaction()
958 if tr is not None:
958 if tr is not None:
959 return tr.nest()
959 return tr.nest()
960
960
961 # abort here if the journal already exists
961 # abort here if the journal already exists
962 if self.svfs.exists("journal"):
962 if self.svfs.exists("journal"):
963 raise error.RepoError(
963 raise error.RepoError(
964 _("abandoned transaction found"),
964 _("abandoned transaction found"),
965 hint=_("run 'hg recover' to clean up transaction"))
965 hint=_("run 'hg recover' to clean up transaction"))
966
966
967 idbase = "%.40f#%f" % (random.random(), time.time())
967 idbase = "%.40f#%f" % (random.random(), time.time())
968 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
968 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
969 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
969 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
970
970
971 self._writejournal(desc)
971 self._writejournal(desc)
972 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
972 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
973 if report:
973 if report:
974 rp = report
974 rp = report
975 else:
975 else:
976 rp = self.ui.warn
976 rp = self.ui.warn
977 vfsmap = {'plain': self.vfs} # root of .hg/
977 vfsmap = {'plain': self.vfs} # root of .hg/
978 # we must avoid cyclic reference between repo and transaction.
978 # we must avoid cyclic reference between repo and transaction.
979 reporef = weakref.ref(self)
979 reporef = weakref.ref(self)
980 def validate(tr):
980 def validate(tr):
981 """will run pre-closing hooks"""
981 """will run pre-closing hooks"""
982 pending = lambda: tr.writepending() and self.root or ""
982 pending = lambda: tr.writepending() and self.root or ""
983 reporef().hook('pretxnclose', throw=True, pending=pending,
983 reporef().hook('pretxnclose', throw=True, pending=pending,
984 txnname=desc, **tr.hookargs)
984 txnname=desc, **tr.hookargs)
985
985
986 tr = transaction.transaction(rp, self.sopener, vfsmap,
986 tr = transaction.transaction(rp, self.sopener, vfsmap,
987 "journal",
987 "journal",
988 "undo",
988 "undo",
989 aftertrans(renames),
989 aftertrans(renames),
990 self.store.createmode,
990 self.store.createmode,
991 validator=validate)
991 validator=validate)
992
992
993 tr.hookargs['txnid'] = txnid
993 tr.hookargs['txnid'] = txnid
994 # note: writing the fncache only during finalize mean that the file is
994 # note: writing the fncache only during finalize mean that the file is
995 # outdated when running hooks. As fncache is used for streaming clone,
995 # outdated when running hooks. As fncache is used for streaming clone,
996 # this is not expected to break anything that happen during the hooks.
996 # this is not expected to break anything that happen during the hooks.
997 tr.addfinalize('flush-fncache', self.store.write)
997 tr.addfinalize('flush-fncache', self.store.write)
998 def txnclosehook(tr2):
998 def txnclosehook(tr2):
999 """To be run if transaction is successful, will schedule a hook run
999 """To be run if transaction is successful, will schedule a hook run
1000 """
1000 """
1001 def hook():
1001 def hook():
1002 reporef().hook('txnclose', throw=False, txnname=desc,
1002 reporef().hook('txnclose', throw=False, txnname=desc,
1003 **tr2.hookargs)
1003 **tr2.hookargs)
1004 reporef()._afterlock(hook)
1004 reporef()._afterlock(hook)
1005 tr.addfinalize('txnclose-hook', txnclosehook)
1005 tr.addfinalize('txnclose-hook', txnclosehook)
1006 def txnaborthook(tr2):
1006 def txnaborthook(tr2):
1007 """To be run if transaction is aborted
1007 """To be run if transaction is aborted
1008 """
1008 """
1009 reporef().hook('txnabort', throw=False, txnname=desc,
1009 reporef().hook('txnabort', throw=False, txnname=desc,
1010 **tr2.hookargs)
1010 **tr2.hookargs)
1011 tr.addabort('txnabort-hook', txnaborthook)
1011 tr.addabort('txnabort-hook', txnaborthook)
1012 self._transref = weakref.ref(tr)
1012 self._transref = weakref.ref(tr)
1013 return tr
1013 return tr
1014
1014
1015 def _journalfiles(self):
1015 def _journalfiles(self):
1016 return ((self.svfs, 'journal'),
1016 return ((self.svfs, 'journal'),
1017 (self.vfs, 'journal.dirstate'),
1017 (self.vfs, 'journal.dirstate'),
1018 (self.vfs, 'journal.branch'),
1018 (self.vfs, 'journal.branch'),
1019 (self.vfs, 'journal.desc'),
1019 (self.vfs, 'journal.desc'),
1020 (self.vfs, 'journal.bookmarks'),
1020 (self.vfs, 'journal.bookmarks'),
1021 (self.svfs, 'journal.phaseroots'))
1021 (self.svfs, 'journal.phaseroots'))
1022
1022
1023 def undofiles(self):
1023 def undofiles(self):
1024 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1024 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1025
1025
1026 def _writejournal(self, desc):
1026 def _writejournal(self, desc):
1027 self.vfs.write("journal.dirstate",
1027 self.vfs.write("journal.dirstate",
1028 self.vfs.tryread("dirstate"))
1028 self.vfs.tryread("dirstate"))
1029 self.vfs.write("journal.branch",
1029 self.vfs.write("journal.branch",
1030 encoding.fromlocal(self.dirstate.branch()))
1030 encoding.fromlocal(self.dirstate.branch()))
1031 self.vfs.write("journal.desc",
1031 self.vfs.write("journal.desc",
1032 "%d\n%s\n" % (len(self), desc))
1032 "%d\n%s\n" % (len(self), desc))
1033 self.vfs.write("journal.bookmarks",
1033 self.vfs.write("journal.bookmarks",
1034 self.vfs.tryread("bookmarks"))
1034 self.vfs.tryread("bookmarks"))
1035 self.svfs.write("journal.phaseroots",
1035 self.svfs.write("journal.phaseroots",
1036 self.svfs.tryread("phaseroots"))
1036 self.svfs.tryread("phaseroots"))
1037
1037
1038 def recover(self):
1038 def recover(self):
1039 lock = self.lock()
1039 lock = self.lock()
1040 try:
1040 try:
1041 if self.svfs.exists("journal"):
1041 if self.svfs.exists("journal"):
1042 self.ui.status(_("rolling back interrupted transaction\n"))
1042 self.ui.status(_("rolling back interrupted transaction\n"))
1043 vfsmap = {'': self.svfs,
1043 vfsmap = {'': self.svfs,
1044 'plain': self.vfs,}
1044 'plain': self.vfs,}
1045 transaction.rollback(self.svfs, vfsmap, "journal",
1045 transaction.rollback(self.svfs, vfsmap, "journal",
1046 self.ui.warn)
1046 self.ui.warn)
1047 self.invalidate()
1047 self.invalidate()
1048 return True
1048 return True
1049 else:
1049 else:
1050 self.ui.warn(_("no interrupted transaction available\n"))
1050 self.ui.warn(_("no interrupted transaction available\n"))
1051 return False
1051 return False
1052 finally:
1052 finally:
1053 lock.release()
1053 lock.release()
1054
1054
1055 def rollback(self, dryrun=False, force=False):
1055 def rollback(self, dryrun=False, force=False):
1056 wlock = lock = None
1056 wlock = lock = None
1057 try:
1057 try:
1058 wlock = self.wlock()
1058 wlock = self.wlock()
1059 lock = self.lock()
1059 lock = self.lock()
1060 if self.svfs.exists("undo"):
1060 if self.svfs.exists("undo"):
1061 return self._rollback(dryrun, force)
1061 return self._rollback(dryrun, force)
1062 else:
1062 else:
1063 self.ui.warn(_("no rollback information available\n"))
1063 self.ui.warn(_("no rollback information available\n"))
1064 return 1
1064 return 1
1065 finally:
1065 finally:
1066 release(lock, wlock)
1066 release(lock, wlock)
1067
1067
1068 @unfilteredmethod # Until we get smarter cache management
1068 @unfilteredmethod # Until we get smarter cache management
1069 def _rollback(self, dryrun, force):
1069 def _rollback(self, dryrun, force):
1070 ui = self.ui
1070 ui = self.ui
1071 try:
1071 try:
1072 args = self.vfs.read('undo.desc').splitlines()
1072 args = self.vfs.read('undo.desc').splitlines()
1073 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1073 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1074 if len(args) >= 3:
1074 if len(args) >= 3:
1075 detail = args[2]
1075 detail = args[2]
1076 oldtip = oldlen - 1
1076 oldtip = oldlen - 1
1077
1077
1078 if detail and ui.verbose:
1078 if detail and ui.verbose:
1079 msg = (_('repository tip rolled back to revision %s'
1079 msg = (_('repository tip rolled back to revision %s'
1080 ' (undo %s: %s)\n')
1080 ' (undo %s: %s)\n')
1081 % (oldtip, desc, detail))
1081 % (oldtip, desc, detail))
1082 else:
1082 else:
1083 msg = (_('repository tip rolled back to revision %s'
1083 msg = (_('repository tip rolled back to revision %s'
1084 ' (undo %s)\n')
1084 ' (undo %s)\n')
1085 % (oldtip, desc))
1085 % (oldtip, desc))
1086 except IOError:
1086 except IOError:
1087 msg = _('rolling back unknown transaction\n')
1087 msg = _('rolling back unknown transaction\n')
1088 desc = None
1088 desc = None
1089
1089
1090 if not force and self['.'] != self['tip'] and desc == 'commit':
1090 if not force and self['.'] != self['tip'] and desc == 'commit':
1091 raise util.Abort(
1091 raise util.Abort(
1092 _('rollback of last commit while not checked out '
1092 _('rollback of last commit while not checked out '
1093 'may lose data'), hint=_('use -f to force'))
1093 'may lose data'), hint=_('use -f to force'))
1094
1094
1095 ui.status(msg)
1095 ui.status(msg)
1096 if dryrun:
1096 if dryrun:
1097 return 0
1097 return 0
1098
1098
1099 parents = self.dirstate.parents()
1099 parents = self.dirstate.parents()
1100 self.destroying()
1100 self.destroying()
1101 vfsmap = {'plain': self.vfs, '': self.svfs}
1101 vfsmap = {'plain': self.vfs, '': self.svfs}
1102 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1102 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1103 if self.vfs.exists('undo.bookmarks'):
1103 if self.vfs.exists('undo.bookmarks'):
1104 self.vfs.rename('undo.bookmarks', 'bookmarks')
1104 self.vfs.rename('undo.bookmarks', 'bookmarks')
1105 if self.svfs.exists('undo.phaseroots'):
1105 if self.svfs.exists('undo.phaseroots'):
1106 self.svfs.rename('undo.phaseroots', 'phaseroots')
1106 self.svfs.rename('undo.phaseroots', 'phaseroots')
1107 self.invalidate()
1107 self.invalidate()
1108
1108
1109 parentgone = (parents[0] not in self.changelog.nodemap or
1109 parentgone = (parents[0] not in self.changelog.nodemap or
1110 parents[1] not in self.changelog.nodemap)
1110 parents[1] not in self.changelog.nodemap)
1111 if parentgone:
1111 if parentgone:
1112 self.vfs.rename('undo.dirstate', 'dirstate')
1112 self.vfs.rename('undo.dirstate', 'dirstate')
1113 try:
1113 try:
1114 branch = self.vfs.read('undo.branch')
1114 branch = self.vfs.read('undo.branch')
1115 self.dirstate.setbranch(encoding.tolocal(branch))
1115 self.dirstate.setbranch(encoding.tolocal(branch))
1116 except IOError:
1116 except IOError:
1117 ui.warn(_('named branch could not be reset: '
1117 ui.warn(_('named branch could not be reset: '
1118 'current branch is still \'%s\'\n')
1118 'current branch is still \'%s\'\n')
1119 % self.dirstate.branch())
1119 % self.dirstate.branch())
1120
1120
1121 self.dirstate.invalidate()
1121 self.dirstate.invalidate()
1122 parents = tuple([p.rev() for p in self.parents()])
1122 parents = tuple([p.rev() for p in self.parents()])
1123 if len(parents) > 1:
1123 if len(parents) > 1:
1124 ui.status(_('working directory now based on '
1124 ui.status(_('working directory now based on '
1125 'revisions %d and %d\n') % parents)
1125 'revisions %d and %d\n') % parents)
1126 else:
1126 else:
1127 ui.status(_('working directory now based on '
1127 ui.status(_('working directory now based on '
1128 'revision %d\n') % parents)
1128 'revision %d\n') % parents)
1129 ms = mergemod.mergestate(self)
1129 ms = mergemod.mergestate(self)
1130 ms.reset(self['.'].node())
1130 ms.reset(self['.'].node())
1131
1131
1132 # TODO: if we know which new heads may result from this rollback, pass
1132 # TODO: if we know which new heads may result from this rollback, pass
1133 # them to destroy(), which will prevent the branchhead cache from being
1133 # them to destroy(), which will prevent the branchhead cache from being
1134 # invalidated.
1134 # invalidated.
1135 self.destroyed()
1135 self.destroyed()
1136 return 0
1136 return 0
1137
1137
1138 def invalidatecaches(self):
1138 def invalidatecaches(self):
1139
1139
1140 if '_tagscache' in vars(self):
1140 if '_tagscache' in vars(self):
1141 # can't use delattr on proxy
1141 # can't use delattr on proxy
1142 del self.__dict__['_tagscache']
1142 del self.__dict__['_tagscache']
1143
1143
1144 self.unfiltered()._branchcaches.clear()
1144 self.unfiltered()._branchcaches.clear()
1145 self.invalidatevolatilesets()
1145 self.invalidatevolatilesets()
1146
1146
1147 def invalidatevolatilesets(self):
1147 def invalidatevolatilesets(self):
1148 self.filteredrevcache.clear()
1148 self.filteredrevcache.clear()
1149 obsolete.clearobscaches(self)
1149 obsolete.clearobscaches(self)
1150
1150
1151 def invalidatedirstate(self):
1151 def invalidatedirstate(self):
1152 '''Invalidates the dirstate, causing the next call to dirstate
1152 '''Invalidates the dirstate, causing the next call to dirstate
1153 to check if it was modified since the last time it was read,
1153 to check if it was modified since the last time it was read,
1154 rereading it if it has.
1154 rereading it if it has.
1155
1155
1156 This is different to dirstate.invalidate() that it doesn't always
1156 This is different to dirstate.invalidate() that it doesn't always
1157 rereads the dirstate. Use dirstate.invalidate() if you want to
1157 rereads the dirstate. Use dirstate.invalidate() if you want to
1158 explicitly read the dirstate again (i.e. restoring it to a previous
1158 explicitly read the dirstate again (i.e. restoring it to a previous
1159 known good state).'''
1159 known good state).'''
1160 if hasunfilteredcache(self, 'dirstate'):
1160 if hasunfilteredcache(self, 'dirstate'):
1161 for k in self.dirstate._filecache:
1161 for k in self.dirstate._filecache:
1162 try:
1162 try:
1163 delattr(self.dirstate, k)
1163 delattr(self.dirstate, k)
1164 except AttributeError:
1164 except AttributeError:
1165 pass
1165 pass
1166 delattr(self.unfiltered(), 'dirstate')
1166 delattr(self.unfiltered(), 'dirstate')
1167
1167
1168 def invalidate(self):
1168 def invalidate(self):
1169 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1169 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1170 for k in self._filecache:
1170 for k in self._filecache:
1171 # dirstate is invalidated separately in invalidatedirstate()
1171 # dirstate is invalidated separately in invalidatedirstate()
1172 if k == 'dirstate':
1172 if k == 'dirstate':
1173 continue
1173 continue
1174
1174
1175 try:
1175 try:
1176 delattr(unfiltered, k)
1176 delattr(unfiltered, k)
1177 except AttributeError:
1177 except AttributeError:
1178 pass
1178 pass
1179 self.invalidatecaches()
1179 self.invalidatecaches()
1180 self.store.invalidatecaches()
1180 self.store.invalidatecaches()
1181
1181
1182 def invalidateall(self):
1182 def invalidateall(self):
1183 '''Fully invalidates both store and non-store parts, causing the
1183 '''Fully invalidates both store and non-store parts, causing the
1184 subsequent operation to reread any outside changes.'''
1184 subsequent operation to reread any outside changes.'''
1185 # extension should hook this to invalidate its caches
1185 # extension should hook this to invalidate its caches
1186 self.invalidate()
1186 self.invalidate()
1187 self.invalidatedirstate()
1187 self.invalidatedirstate()
1188
1188
1189 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1189 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1190 try:
1190 try:
1191 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1191 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1192 except error.LockHeld, inst:
1192 except error.LockHeld, inst:
1193 if not wait:
1193 if not wait:
1194 raise
1194 raise
1195 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1195 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1196 (desc, inst.locker))
1196 (desc, inst.locker))
1197 # default to 600 seconds timeout
1197 # default to 600 seconds timeout
1198 l = lockmod.lock(vfs, lockname,
1198 l = lockmod.lock(vfs, lockname,
1199 int(self.ui.config("ui", "timeout", "600")),
1199 int(self.ui.config("ui", "timeout", "600")),
1200 releasefn, desc=desc)
1200 releasefn, desc=desc)
1201 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1201 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1202 if acquirefn:
1202 if acquirefn:
1203 acquirefn()
1203 acquirefn()
1204 return l
1204 return l
1205
1205
1206 def _afterlock(self, callback):
1206 def _afterlock(self, callback):
1207 """add a callback to be run when the repository is fully unlocked
1207 """add a callback to be run when the repository is fully unlocked
1208
1208
1209 The callback will be executed when the outermost lock is released
1209 The callback will be executed when the outermost lock is released
1210 (with wlock being higher level than 'lock')."""
1210 (with wlock being higher level than 'lock')."""
1211 for ref in (self._wlockref, self._lockref):
1211 for ref in (self._wlockref, self._lockref):
1212 l = ref and ref()
1212 l = ref and ref()
1213 if l and l.held:
1213 if l and l.held:
1214 l.postrelease.append(callback)
1214 l.postrelease.append(callback)
1215 break
1215 break
1216 else: # no lock have been found.
1216 else: # no lock have been found.
1217 callback()
1217 callback()
1218
1218
1219 def lock(self, wait=True):
1219 def lock(self, wait=True):
1220 '''Lock the repository store (.hg/store) and return a weak reference
1220 '''Lock the repository store (.hg/store) and return a weak reference
1221 to the lock. Use this before modifying the store (e.g. committing or
1221 to the lock. Use this before modifying the store (e.g. committing or
1222 stripping). If you are opening a transaction, get a lock as well.)
1222 stripping). If you are opening a transaction, get a lock as well.)
1223
1223
1224 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1224 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1225 'wlock' first to avoid a dead-lock hazard.'''
1225 'wlock' first to avoid a dead-lock hazard.'''
1226 l = self._lockref and self._lockref()
1226 l = self._lockref and self._lockref()
1227 if l is not None and l.held:
1227 if l is not None and l.held:
1228 l.lock()
1228 l.lock()
1229 return l
1229 return l
1230
1230
1231 def unlock():
1231 def unlock():
1232 for k, ce in self._filecache.items():
1232 for k, ce in self._filecache.items():
1233 if k == 'dirstate' or k not in self.__dict__:
1233 if k == 'dirstate' or k not in self.__dict__:
1234 continue
1234 continue
1235 ce.refresh()
1235 ce.refresh()
1236
1236
1237 l = self._lock(self.svfs, "lock", wait, unlock,
1237 l = self._lock(self.svfs, "lock", wait, unlock,
1238 self.invalidate, _('repository %s') % self.origroot)
1238 self.invalidate, _('repository %s') % self.origroot)
1239 self._lockref = weakref.ref(l)
1239 self._lockref = weakref.ref(l)
1240 return l
1240 return l
1241
1241
1242 def wlock(self, wait=True):
1242 def wlock(self, wait=True):
1243 '''Lock the non-store parts of the repository (everything under
1243 '''Lock the non-store parts of the repository (everything under
1244 .hg except .hg/store) and return a weak reference to the lock.
1244 .hg except .hg/store) and return a weak reference to the lock.
1245
1245
1246 Use this before modifying files in .hg.
1246 Use this before modifying files in .hg.
1247
1247
1248 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1248 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1249 'wlock' first to avoid a dead-lock hazard.'''
1249 'wlock' first to avoid a dead-lock hazard.'''
1250 l = self._wlockref and self._wlockref()
1250 l = self._wlockref and self._wlockref()
1251 if l is not None and l.held:
1251 if l is not None and l.held:
1252 l.lock()
1252 l.lock()
1253 return l
1253 return l
1254
1254
1255 # We do not need to check for non-waiting lock aquisition. Such
1255 # We do not need to check for non-waiting lock aquisition. Such
1256 # acquisition would not cause dead-lock as they would just fail.
1256 # acquisition would not cause dead-lock as they would just fail.
1257 if wait and (self.ui.configbool('devel', 'all-warnings')
1257 if wait and (self.ui.configbool('devel', 'all-warnings')
1258 or self.ui.configbool('devel', 'check-locks')):
1258 or self.ui.configbool('devel', 'check-locks')):
1259 l = self._lockref and self._lockref()
1259 l = self._lockref and self._lockref()
1260 if l is not None and l.held:
1260 if l is not None and l.held:
1261 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1261 self.ui.develwarn('"wlock" acquired after "lock"')
1262
1262
1263 def unlock():
1263 def unlock():
1264 if self.dirstate.pendingparentchange():
1264 if self.dirstate.pendingparentchange():
1265 self.dirstate.invalidate()
1265 self.dirstate.invalidate()
1266 else:
1266 else:
1267 self.dirstate.write()
1267 self.dirstate.write()
1268
1268
1269 self._filecache['dirstate'].refresh()
1269 self._filecache['dirstate'].refresh()
1270
1270
1271 l = self._lock(self.vfs, "wlock", wait, unlock,
1271 l = self._lock(self.vfs, "wlock", wait, unlock,
1272 self.invalidatedirstate, _('working directory of %s') %
1272 self.invalidatedirstate, _('working directory of %s') %
1273 self.origroot)
1273 self.origroot)
1274 self._wlockref = weakref.ref(l)
1274 self._wlockref = weakref.ref(l)
1275 return l
1275 return l
1276
1276
1277 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1277 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1278 """
1278 """
1279 commit an individual file as part of a larger transaction
1279 commit an individual file as part of a larger transaction
1280 """
1280 """
1281
1281
1282 fname = fctx.path()
1282 fname = fctx.path()
1283 fparent1 = manifest1.get(fname, nullid)
1283 fparent1 = manifest1.get(fname, nullid)
1284 fparent2 = manifest2.get(fname, nullid)
1284 fparent2 = manifest2.get(fname, nullid)
1285 if isinstance(fctx, context.filectx):
1285 if isinstance(fctx, context.filectx):
1286 node = fctx.filenode()
1286 node = fctx.filenode()
1287 if node in [fparent1, fparent2]:
1287 if node in [fparent1, fparent2]:
1288 self.ui.debug('reusing %s filelog entry\n' % fname)
1288 self.ui.debug('reusing %s filelog entry\n' % fname)
1289 return node
1289 return node
1290
1290
1291 flog = self.file(fname)
1291 flog = self.file(fname)
1292 meta = {}
1292 meta = {}
1293 copy = fctx.renamed()
1293 copy = fctx.renamed()
1294 if copy and copy[0] != fname:
1294 if copy and copy[0] != fname:
1295 # Mark the new revision of this file as a copy of another
1295 # Mark the new revision of this file as a copy of another
1296 # file. This copy data will effectively act as a parent
1296 # file. This copy data will effectively act as a parent
1297 # of this new revision. If this is a merge, the first
1297 # of this new revision. If this is a merge, the first
1298 # parent will be the nullid (meaning "look up the copy data")
1298 # parent will be the nullid (meaning "look up the copy data")
1299 # and the second one will be the other parent. For example:
1299 # and the second one will be the other parent. For example:
1300 #
1300 #
1301 # 0 --- 1 --- 3 rev1 changes file foo
1301 # 0 --- 1 --- 3 rev1 changes file foo
1302 # \ / rev2 renames foo to bar and changes it
1302 # \ / rev2 renames foo to bar and changes it
1303 # \- 2 -/ rev3 should have bar with all changes and
1303 # \- 2 -/ rev3 should have bar with all changes and
1304 # should record that bar descends from
1304 # should record that bar descends from
1305 # bar in rev2 and foo in rev1
1305 # bar in rev2 and foo in rev1
1306 #
1306 #
1307 # this allows this merge to succeed:
1307 # this allows this merge to succeed:
1308 #
1308 #
1309 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1309 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1310 # \ / merging rev3 and rev4 should use bar@rev2
1310 # \ / merging rev3 and rev4 should use bar@rev2
1311 # \- 2 --- 4 as the merge base
1311 # \- 2 --- 4 as the merge base
1312 #
1312 #
1313
1313
1314 cfname = copy[0]
1314 cfname = copy[0]
1315 crev = manifest1.get(cfname)
1315 crev = manifest1.get(cfname)
1316 newfparent = fparent2
1316 newfparent = fparent2
1317
1317
1318 if manifest2: # branch merge
1318 if manifest2: # branch merge
1319 if fparent2 == nullid or crev is None: # copied on remote side
1319 if fparent2 == nullid or crev is None: # copied on remote side
1320 if cfname in manifest2:
1320 if cfname in manifest2:
1321 crev = manifest2[cfname]
1321 crev = manifest2[cfname]
1322 newfparent = fparent1
1322 newfparent = fparent1
1323
1323
1324 # Here, we used to search backwards through history to try to find
1324 # Here, we used to search backwards through history to try to find
1325 # where the file copy came from if the source of a copy was not in
1325 # where the file copy came from if the source of a copy was not in
1326 # the parent directory. However, this doesn't actually make sense to
1326 # the parent directory. However, this doesn't actually make sense to
1327 # do (what does a copy from something not in your working copy even
1327 # do (what does a copy from something not in your working copy even
1328 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1328 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1329 # the user that copy information was dropped, so if they didn't
1329 # the user that copy information was dropped, so if they didn't
1330 # expect this outcome it can be fixed, but this is the correct
1330 # expect this outcome it can be fixed, but this is the correct
1331 # behavior in this circumstance.
1331 # behavior in this circumstance.
1332
1332
1333 if crev:
1333 if crev:
1334 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1334 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1335 meta["copy"] = cfname
1335 meta["copy"] = cfname
1336 meta["copyrev"] = hex(crev)
1336 meta["copyrev"] = hex(crev)
1337 fparent1, fparent2 = nullid, newfparent
1337 fparent1, fparent2 = nullid, newfparent
1338 else:
1338 else:
1339 self.ui.warn(_("warning: can't find ancestor for '%s' "
1339 self.ui.warn(_("warning: can't find ancestor for '%s' "
1340 "copied from '%s'!\n") % (fname, cfname))
1340 "copied from '%s'!\n") % (fname, cfname))
1341
1341
1342 elif fparent1 == nullid:
1342 elif fparent1 == nullid:
1343 fparent1, fparent2 = fparent2, nullid
1343 fparent1, fparent2 = fparent2, nullid
1344 elif fparent2 != nullid:
1344 elif fparent2 != nullid:
1345 # is one parent an ancestor of the other?
1345 # is one parent an ancestor of the other?
1346 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1346 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1347 if fparent1 in fparentancestors:
1347 if fparent1 in fparentancestors:
1348 fparent1, fparent2 = fparent2, nullid
1348 fparent1, fparent2 = fparent2, nullid
1349 elif fparent2 in fparentancestors:
1349 elif fparent2 in fparentancestors:
1350 fparent2 = nullid
1350 fparent2 = nullid
1351
1351
1352 # is the file changed?
1352 # is the file changed?
1353 text = fctx.data()
1353 text = fctx.data()
1354 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1354 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1355 changelist.append(fname)
1355 changelist.append(fname)
1356 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1356 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1357 # are just the flags changed during merge?
1357 # are just the flags changed during merge?
1358 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1358 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1359 changelist.append(fname)
1359 changelist.append(fname)
1360
1360
1361 return fparent1
1361 return fparent1
1362
1362
1363 @unfilteredmethod
1363 @unfilteredmethod
1364 def commit(self, text="", user=None, date=None, match=None, force=False,
1364 def commit(self, text="", user=None, date=None, match=None, force=False,
1365 editor=False, extra={}):
1365 editor=False, extra={}):
1366 """Add a new revision to current repository.
1366 """Add a new revision to current repository.
1367
1367
1368 Revision information is gathered from the working directory,
1368 Revision information is gathered from the working directory,
1369 match can be used to filter the committed files. If editor is
1369 match can be used to filter the committed files. If editor is
1370 supplied, it is called to get a commit message.
1370 supplied, it is called to get a commit message.
1371 """
1371 """
1372
1372
1373 def fail(f, msg):
1373 def fail(f, msg):
1374 raise util.Abort('%s: %s' % (f, msg))
1374 raise util.Abort('%s: %s' % (f, msg))
1375
1375
1376 if not match:
1376 if not match:
1377 match = matchmod.always(self.root, '')
1377 match = matchmod.always(self.root, '')
1378
1378
1379 if not force:
1379 if not force:
1380 vdirs = []
1380 vdirs = []
1381 match.explicitdir = vdirs.append
1381 match.explicitdir = vdirs.append
1382 match.bad = fail
1382 match.bad = fail
1383
1383
1384 wlock = self.wlock()
1384 wlock = self.wlock()
1385 try:
1385 try:
1386 wctx = self[None]
1386 wctx = self[None]
1387 merge = len(wctx.parents()) > 1
1387 merge = len(wctx.parents()) > 1
1388
1388
1389 if not force and merge and match.ispartial():
1389 if not force and merge and match.ispartial():
1390 raise util.Abort(_('cannot partially commit a merge '
1390 raise util.Abort(_('cannot partially commit a merge '
1391 '(do not specify files or patterns)'))
1391 '(do not specify files or patterns)'))
1392
1392
1393 status = self.status(match=match, clean=force)
1393 status = self.status(match=match, clean=force)
1394 if force:
1394 if force:
1395 status.modified.extend(status.clean) # mq may commit clean files
1395 status.modified.extend(status.clean) # mq may commit clean files
1396
1396
1397 # check subrepos
1397 # check subrepos
1398 subs = []
1398 subs = []
1399 commitsubs = set()
1399 commitsubs = set()
1400 newstate = wctx.substate.copy()
1400 newstate = wctx.substate.copy()
1401 # only manage subrepos and .hgsubstate if .hgsub is present
1401 # only manage subrepos and .hgsubstate if .hgsub is present
1402 if '.hgsub' in wctx:
1402 if '.hgsub' in wctx:
1403 # we'll decide whether to track this ourselves, thanks
1403 # we'll decide whether to track this ourselves, thanks
1404 for c in status.modified, status.added, status.removed:
1404 for c in status.modified, status.added, status.removed:
1405 if '.hgsubstate' in c:
1405 if '.hgsubstate' in c:
1406 c.remove('.hgsubstate')
1406 c.remove('.hgsubstate')
1407
1407
1408 # compare current state to last committed state
1408 # compare current state to last committed state
1409 # build new substate based on last committed state
1409 # build new substate based on last committed state
1410 oldstate = wctx.p1().substate
1410 oldstate = wctx.p1().substate
1411 for s in sorted(newstate.keys()):
1411 for s in sorted(newstate.keys()):
1412 if not match(s):
1412 if not match(s):
1413 # ignore working copy, use old state if present
1413 # ignore working copy, use old state if present
1414 if s in oldstate:
1414 if s in oldstate:
1415 newstate[s] = oldstate[s]
1415 newstate[s] = oldstate[s]
1416 continue
1416 continue
1417 if not force:
1417 if not force:
1418 raise util.Abort(
1418 raise util.Abort(
1419 _("commit with new subrepo %s excluded") % s)
1419 _("commit with new subrepo %s excluded") % s)
1420 dirtyreason = wctx.sub(s).dirtyreason(True)
1420 dirtyreason = wctx.sub(s).dirtyreason(True)
1421 if dirtyreason:
1421 if dirtyreason:
1422 if not self.ui.configbool('ui', 'commitsubrepos'):
1422 if not self.ui.configbool('ui', 'commitsubrepos'):
1423 raise util.Abort(dirtyreason,
1423 raise util.Abort(dirtyreason,
1424 hint=_("use --subrepos for recursive commit"))
1424 hint=_("use --subrepos for recursive commit"))
1425 subs.append(s)
1425 subs.append(s)
1426 commitsubs.add(s)
1426 commitsubs.add(s)
1427 else:
1427 else:
1428 bs = wctx.sub(s).basestate()
1428 bs = wctx.sub(s).basestate()
1429 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1429 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1430 if oldstate.get(s, (None, None, None))[1] != bs:
1430 if oldstate.get(s, (None, None, None))[1] != bs:
1431 subs.append(s)
1431 subs.append(s)
1432
1432
1433 # check for removed subrepos
1433 # check for removed subrepos
1434 for p in wctx.parents():
1434 for p in wctx.parents():
1435 r = [s for s in p.substate if s not in newstate]
1435 r = [s for s in p.substate if s not in newstate]
1436 subs += [s for s in r if match(s)]
1436 subs += [s for s in r if match(s)]
1437 if subs:
1437 if subs:
1438 if (not match('.hgsub') and
1438 if (not match('.hgsub') and
1439 '.hgsub' in (wctx.modified() + wctx.added())):
1439 '.hgsub' in (wctx.modified() + wctx.added())):
1440 raise util.Abort(
1440 raise util.Abort(
1441 _("can't commit subrepos without .hgsub"))
1441 _("can't commit subrepos without .hgsub"))
1442 status.modified.insert(0, '.hgsubstate')
1442 status.modified.insert(0, '.hgsubstate')
1443
1443
1444 elif '.hgsub' in status.removed:
1444 elif '.hgsub' in status.removed:
1445 # clean up .hgsubstate when .hgsub is removed
1445 # clean up .hgsubstate when .hgsub is removed
1446 if ('.hgsubstate' in wctx and
1446 if ('.hgsubstate' in wctx and
1447 '.hgsubstate' not in (status.modified + status.added +
1447 '.hgsubstate' not in (status.modified + status.added +
1448 status.removed)):
1448 status.removed)):
1449 status.removed.insert(0, '.hgsubstate')
1449 status.removed.insert(0, '.hgsubstate')
1450
1450
1451 # make sure all explicit patterns are matched
1451 # make sure all explicit patterns are matched
1452 if not force and (match.isexact() or match.prefix()):
1452 if not force and (match.isexact() or match.prefix()):
1453 matched = set(status.modified + status.added + status.removed)
1453 matched = set(status.modified + status.added + status.removed)
1454
1454
1455 for f in match.files():
1455 for f in match.files():
1456 f = self.dirstate.normalize(f)
1456 f = self.dirstate.normalize(f)
1457 if f == '.' or f in matched or f in wctx.substate:
1457 if f == '.' or f in matched or f in wctx.substate:
1458 continue
1458 continue
1459 if f in status.deleted:
1459 if f in status.deleted:
1460 fail(f, _('file not found!'))
1460 fail(f, _('file not found!'))
1461 if f in vdirs: # visited directory
1461 if f in vdirs: # visited directory
1462 d = f + '/'
1462 d = f + '/'
1463 for mf in matched:
1463 for mf in matched:
1464 if mf.startswith(d):
1464 if mf.startswith(d):
1465 break
1465 break
1466 else:
1466 else:
1467 fail(f, _("no match under directory!"))
1467 fail(f, _("no match under directory!"))
1468 elif f not in self.dirstate:
1468 elif f not in self.dirstate:
1469 fail(f, _("file not tracked!"))
1469 fail(f, _("file not tracked!"))
1470
1470
1471 cctx = context.workingcommitctx(self, status,
1471 cctx = context.workingcommitctx(self, status,
1472 text, user, date, extra)
1472 text, user, date, extra)
1473
1473
1474 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1474 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1475 or extra.get('close') or merge or cctx.files()
1475 or extra.get('close') or merge or cctx.files()
1476 or self.ui.configbool('ui', 'allowemptycommit'))
1476 or self.ui.configbool('ui', 'allowemptycommit'))
1477 if not allowemptycommit:
1477 if not allowemptycommit:
1478 return None
1478 return None
1479
1479
1480 if merge and cctx.deleted():
1480 if merge and cctx.deleted():
1481 raise util.Abort(_("cannot commit merge with missing files"))
1481 raise util.Abort(_("cannot commit merge with missing files"))
1482
1482
1483 ms = mergemod.mergestate(self)
1483 ms = mergemod.mergestate(self)
1484 for f in status.modified:
1484 for f in status.modified:
1485 if f in ms and ms[f] == 'u':
1485 if f in ms and ms[f] == 'u':
1486 raise util.Abort(_('unresolved merge conflicts '
1486 raise util.Abort(_('unresolved merge conflicts '
1487 '(see "hg help resolve")'))
1487 '(see "hg help resolve")'))
1488
1488
1489 if editor:
1489 if editor:
1490 cctx._text = editor(self, cctx, subs)
1490 cctx._text = editor(self, cctx, subs)
1491 edited = (text != cctx._text)
1491 edited = (text != cctx._text)
1492
1492
1493 # Save commit message in case this transaction gets rolled back
1493 # Save commit message in case this transaction gets rolled back
1494 # (e.g. by a pretxncommit hook). Leave the content alone on
1494 # (e.g. by a pretxncommit hook). Leave the content alone on
1495 # the assumption that the user will use the same editor again.
1495 # the assumption that the user will use the same editor again.
1496 msgfn = self.savecommitmessage(cctx._text)
1496 msgfn = self.savecommitmessage(cctx._text)
1497
1497
1498 # commit subs and write new state
1498 # commit subs and write new state
1499 if subs:
1499 if subs:
1500 for s in sorted(commitsubs):
1500 for s in sorted(commitsubs):
1501 sub = wctx.sub(s)
1501 sub = wctx.sub(s)
1502 self.ui.status(_('committing subrepository %s\n') %
1502 self.ui.status(_('committing subrepository %s\n') %
1503 subrepo.subrelpath(sub))
1503 subrepo.subrelpath(sub))
1504 sr = sub.commit(cctx._text, user, date)
1504 sr = sub.commit(cctx._text, user, date)
1505 newstate[s] = (newstate[s][0], sr)
1505 newstate[s] = (newstate[s][0], sr)
1506 subrepo.writestate(self, newstate)
1506 subrepo.writestate(self, newstate)
1507
1507
1508 p1, p2 = self.dirstate.parents()
1508 p1, p2 = self.dirstate.parents()
1509 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1509 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1510 try:
1510 try:
1511 self.hook("precommit", throw=True, parent1=hookp1,
1511 self.hook("precommit", throw=True, parent1=hookp1,
1512 parent2=hookp2)
1512 parent2=hookp2)
1513 ret = self.commitctx(cctx, True)
1513 ret = self.commitctx(cctx, True)
1514 except: # re-raises
1514 except: # re-raises
1515 if edited:
1515 if edited:
1516 self.ui.write(
1516 self.ui.write(
1517 _('note: commit message saved in %s\n') % msgfn)
1517 _('note: commit message saved in %s\n') % msgfn)
1518 raise
1518 raise
1519
1519
1520 # update bookmarks, dirstate and mergestate
1520 # update bookmarks, dirstate and mergestate
1521 bookmarks.update(self, [p1, p2], ret)
1521 bookmarks.update(self, [p1, p2], ret)
1522 cctx.markcommitted(ret)
1522 cctx.markcommitted(ret)
1523 ms.reset()
1523 ms.reset()
1524 finally:
1524 finally:
1525 wlock.release()
1525 wlock.release()
1526
1526
1527 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1527 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1528 # hack for command that use a temporary commit (eg: histedit)
1528 # hack for command that use a temporary commit (eg: histedit)
1529 # temporary commit got stripped before hook release
1529 # temporary commit got stripped before hook release
1530 if self.changelog.hasnode(ret):
1530 if self.changelog.hasnode(ret):
1531 self.hook("commit", node=node, parent1=parent1,
1531 self.hook("commit", node=node, parent1=parent1,
1532 parent2=parent2)
1532 parent2=parent2)
1533 self._afterlock(commithook)
1533 self._afterlock(commithook)
1534 return ret
1534 return ret
1535
1535
1536 @unfilteredmethod
1536 @unfilteredmethod
1537 def commitctx(self, ctx, error=False):
1537 def commitctx(self, ctx, error=False):
1538 """Add a new revision to current repository.
1538 """Add a new revision to current repository.
1539 Revision information is passed via the context argument.
1539 Revision information is passed via the context argument.
1540 """
1540 """
1541
1541
1542 tr = None
1542 tr = None
1543 p1, p2 = ctx.p1(), ctx.p2()
1543 p1, p2 = ctx.p1(), ctx.p2()
1544 user = ctx.user()
1544 user = ctx.user()
1545
1545
1546 lock = self.lock()
1546 lock = self.lock()
1547 try:
1547 try:
1548 tr = self.transaction("commit")
1548 tr = self.transaction("commit")
1549 trp = weakref.proxy(tr)
1549 trp = weakref.proxy(tr)
1550
1550
1551 if ctx.files():
1551 if ctx.files():
1552 m1 = p1.manifest()
1552 m1 = p1.manifest()
1553 m2 = p2.manifest()
1553 m2 = p2.manifest()
1554 m = m1.copy()
1554 m = m1.copy()
1555
1555
1556 # check in files
1556 # check in files
1557 added = []
1557 added = []
1558 changed = []
1558 changed = []
1559 removed = list(ctx.removed())
1559 removed = list(ctx.removed())
1560 linkrev = len(self)
1560 linkrev = len(self)
1561 self.ui.note(_("committing files:\n"))
1561 self.ui.note(_("committing files:\n"))
1562 for f in sorted(ctx.modified() + ctx.added()):
1562 for f in sorted(ctx.modified() + ctx.added()):
1563 self.ui.note(f + "\n")
1563 self.ui.note(f + "\n")
1564 try:
1564 try:
1565 fctx = ctx[f]
1565 fctx = ctx[f]
1566 if fctx is None:
1566 if fctx is None:
1567 removed.append(f)
1567 removed.append(f)
1568 else:
1568 else:
1569 added.append(f)
1569 added.append(f)
1570 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1570 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1571 trp, changed)
1571 trp, changed)
1572 m.setflag(f, fctx.flags())
1572 m.setflag(f, fctx.flags())
1573 except OSError, inst:
1573 except OSError, inst:
1574 self.ui.warn(_("trouble committing %s!\n") % f)
1574 self.ui.warn(_("trouble committing %s!\n") % f)
1575 raise
1575 raise
1576 except IOError, inst:
1576 except IOError, inst:
1577 errcode = getattr(inst, 'errno', errno.ENOENT)
1577 errcode = getattr(inst, 'errno', errno.ENOENT)
1578 if error or errcode and errcode != errno.ENOENT:
1578 if error or errcode and errcode != errno.ENOENT:
1579 self.ui.warn(_("trouble committing %s!\n") % f)
1579 self.ui.warn(_("trouble committing %s!\n") % f)
1580 raise
1580 raise
1581
1581
1582 # update manifest
1582 # update manifest
1583 self.ui.note(_("committing manifest\n"))
1583 self.ui.note(_("committing manifest\n"))
1584 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1584 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1585 drop = [f for f in removed if f in m]
1585 drop = [f for f in removed if f in m]
1586 for f in drop:
1586 for f in drop:
1587 del m[f]
1587 del m[f]
1588 mn = self.manifest.add(m, trp, linkrev,
1588 mn = self.manifest.add(m, trp, linkrev,
1589 p1.manifestnode(), p2.manifestnode(),
1589 p1.manifestnode(), p2.manifestnode(),
1590 added, drop)
1590 added, drop)
1591 files = changed + removed
1591 files = changed + removed
1592 else:
1592 else:
1593 mn = p1.manifestnode()
1593 mn = p1.manifestnode()
1594 files = []
1594 files = []
1595
1595
1596 # update changelog
1596 # update changelog
1597 self.ui.note(_("committing changelog\n"))
1597 self.ui.note(_("committing changelog\n"))
1598 self.changelog.delayupdate(tr)
1598 self.changelog.delayupdate(tr)
1599 n = self.changelog.add(mn, files, ctx.description(),
1599 n = self.changelog.add(mn, files, ctx.description(),
1600 trp, p1.node(), p2.node(),
1600 trp, p1.node(), p2.node(),
1601 user, ctx.date(), ctx.extra().copy())
1601 user, ctx.date(), ctx.extra().copy())
1602 p = lambda: tr.writepending() and self.root or ""
1602 p = lambda: tr.writepending() and self.root or ""
1603 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1603 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1604 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1604 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1605 parent2=xp2, pending=p)
1605 parent2=xp2, pending=p)
1606 # set the new commit is proper phase
1606 # set the new commit is proper phase
1607 targetphase = subrepo.newcommitphase(self.ui, ctx)
1607 targetphase = subrepo.newcommitphase(self.ui, ctx)
1608 if targetphase:
1608 if targetphase:
1609 # retract boundary do not alter parent changeset.
1609 # retract boundary do not alter parent changeset.
1610 # if a parent have higher the resulting phase will
1610 # if a parent have higher the resulting phase will
1611 # be compliant anyway
1611 # be compliant anyway
1612 #
1612 #
1613 # if minimal phase was 0 we don't need to retract anything
1613 # if minimal phase was 0 we don't need to retract anything
1614 phases.retractboundary(self, tr, targetphase, [n])
1614 phases.retractboundary(self, tr, targetphase, [n])
1615 tr.close()
1615 tr.close()
1616 branchmap.updatecache(self.filtered('served'))
1616 branchmap.updatecache(self.filtered('served'))
1617 return n
1617 return n
1618 finally:
1618 finally:
1619 if tr:
1619 if tr:
1620 tr.release()
1620 tr.release()
1621 lock.release()
1621 lock.release()
1622
1622
1623 @unfilteredmethod
1623 @unfilteredmethod
1624 def destroying(self):
1624 def destroying(self):
1625 '''Inform the repository that nodes are about to be destroyed.
1625 '''Inform the repository that nodes are about to be destroyed.
1626 Intended for use by strip and rollback, so there's a common
1626 Intended for use by strip and rollback, so there's a common
1627 place for anything that has to be done before destroying history.
1627 place for anything that has to be done before destroying history.
1628
1628
1629 This is mostly useful for saving state that is in memory and waiting
1629 This is mostly useful for saving state that is in memory and waiting
1630 to be flushed when the current lock is released. Because a call to
1630 to be flushed when the current lock is released. Because a call to
1631 destroyed is imminent, the repo will be invalidated causing those
1631 destroyed is imminent, the repo will be invalidated causing those
1632 changes to stay in memory (waiting for the next unlock), or vanish
1632 changes to stay in memory (waiting for the next unlock), or vanish
1633 completely.
1633 completely.
1634 '''
1634 '''
1635 # When using the same lock to commit and strip, the phasecache is left
1635 # When using the same lock to commit and strip, the phasecache is left
1636 # dirty after committing. Then when we strip, the repo is invalidated,
1636 # dirty after committing. Then when we strip, the repo is invalidated,
1637 # causing those changes to disappear.
1637 # causing those changes to disappear.
1638 if '_phasecache' in vars(self):
1638 if '_phasecache' in vars(self):
1639 self._phasecache.write()
1639 self._phasecache.write()
1640
1640
1641 @unfilteredmethod
1641 @unfilteredmethod
1642 def destroyed(self):
1642 def destroyed(self):
1643 '''Inform the repository that nodes have been destroyed.
1643 '''Inform the repository that nodes have been destroyed.
1644 Intended for use by strip and rollback, so there's a common
1644 Intended for use by strip and rollback, so there's a common
1645 place for anything that has to be done after destroying history.
1645 place for anything that has to be done after destroying history.
1646 '''
1646 '''
1647 # When one tries to:
1647 # When one tries to:
1648 # 1) destroy nodes thus calling this method (e.g. strip)
1648 # 1) destroy nodes thus calling this method (e.g. strip)
1649 # 2) use phasecache somewhere (e.g. commit)
1649 # 2) use phasecache somewhere (e.g. commit)
1650 #
1650 #
1651 # then 2) will fail because the phasecache contains nodes that were
1651 # then 2) will fail because the phasecache contains nodes that were
1652 # removed. We can either remove phasecache from the filecache,
1652 # removed. We can either remove phasecache from the filecache,
1653 # causing it to reload next time it is accessed, or simply filter
1653 # causing it to reload next time it is accessed, or simply filter
1654 # the removed nodes now and write the updated cache.
1654 # the removed nodes now and write the updated cache.
1655 self._phasecache.filterunknown(self)
1655 self._phasecache.filterunknown(self)
1656 self._phasecache.write()
1656 self._phasecache.write()
1657
1657
1658 # update the 'served' branch cache to help read only server process
1658 # update the 'served' branch cache to help read only server process
1659 # Thanks to branchcache collaboration this is done from the nearest
1659 # Thanks to branchcache collaboration this is done from the nearest
1660 # filtered subset and it is expected to be fast.
1660 # filtered subset and it is expected to be fast.
1661 branchmap.updatecache(self.filtered('served'))
1661 branchmap.updatecache(self.filtered('served'))
1662
1662
1663 # Ensure the persistent tag cache is updated. Doing it now
1663 # Ensure the persistent tag cache is updated. Doing it now
1664 # means that the tag cache only has to worry about destroyed
1664 # means that the tag cache only has to worry about destroyed
1665 # heads immediately after a strip/rollback. That in turn
1665 # heads immediately after a strip/rollback. That in turn
1666 # guarantees that "cachetip == currenttip" (comparing both rev
1666 # guarantees that "cachetip == currenttip" (comparing both rev
1667 # and node) always means no nodes have been added or destroyed.
1667 # and node) always means no nodes have been added or destroyed.
1668
1668
1669 # XXX this is suboptimal when qrefresh'ing: we strip the current
1669 # XXX this is suboptimal when qrefresh'ing: we strip the current
1670 # head, refresh the tag cache, then immediately add a new head.
1670 # head, refresh the tag cache, then immediately add a new head.
1671 # But I think doing it this way is necessary for the "instant
1671 # But I think doing it this way is necessary for the "instant
1672 # tag cache retrieval" case to work.
1672 # tag cache retrieval" case to work.
1673 self.invalidate()
1673 self.invalidate()
1674
1674
1675 def walk(self, match, node=None):
1675 def walk(self, match, node=None):
1676 '''
1676 '''
1677 walk recursively through the directory tree or a given
1677 walk recursively through the directory tree or a given
1678 changeset, finding all files matched by the match
1678 changeset, finding all files matched by the match
1679 function
1679 function
1680 '''
1680 '''
1681 return self[node].walk(match)
1681 return self[node].walk(match)
1682
1682
1683 def status(self, node1='.', node2=None, match=None,
1683 def status(self, node1='.', node2=None, match=None,
1684 ignored=False, clean=False, unknown=False,
1684 ignored=False, clean=False, unknown=False,
1685 listsubrepos=False):
1685 listsubrepos=False):
1686 '''a convenience method that calls node1.status(node2)'''
1686 '''a convenience method that calls node1.status(node2)'''
1687 return self[node1].status(node2, match, ignored, clean, unknown,
1687 return self[node1].status(node2, match, ignored, clean, unknown,
1688 listsubrepos)
1688 listsubrepos)
1689
1689
1690 def heads(self, start=None):
1690 def heads(self, start=None):
1691 heads = self.changelog.heads(start)
1691 heads = self.changelog.heads(start)
1692 # sort the output in rev descending order
1692 # sort the output in rev descending order
1693 return sorted(heads, key=self.changelog.rev, reverse=True)
1693 return sorted(heads, key=self.changelog.rev, reverse=True)
1694
1694
1695 def branchheads(self, branch=None, start=None, closed=False):
1695 def branchheads(self, branch=None, start=None, closed=False):
1696 '''return a (possibly filtered) list of heads for the given branch
1696 '''return a (possibly filtered) list of heads for the given branch
1697
1697
1698 Heads are returned in topological order, from newest to oldest.
1698 Heads are returned in topological order, from newest to oldest.
1699 If branch is None, use the dirstate branch.
1699 If branch is None, use the dirstate branch.
1700 If start is not None, return only heads reachable from start.
1700 If start is not None, return only heads reachable from start.
1701 If closed is True, return heads that are marked as closed as well.
1701 If closed is True, return heads that are marked as closed as well.
1702 '''
1702 '''
1703 if branch is None:
1703 if branch is None:
1704 branch = self[None].branch()
1704 branch = self[None].branch()
1705 branches = self.branchmap()
1705 branches = self.branchmap()
1706 if branch not in branches:
1706 if branch not in branches:
1707 return []
1707 return []
1708 # the cache returns heads ordered lowest to highest
1708 # the cache returns heads ordered lowest to highest
1709 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1709 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1710 if start is not None:
1710 if start is not None:
1711 # filter out the heads that cannot be reached from startrev
1711 # filter out the heads that cannot be reached from startrev
1712 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1712 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1713 bheads = [h for h in bheads if h in fbheads]
1713 bheads = [h for h in bheads if h in fbheads]
1714 return bheads
1714 return bheads
1715
1715
1716 def branches(self, nodes):
1716 def branches(self, nodes):
1717 if not nodes:
1717 if not nodes:
1718 nodes = [self.changelog.tip()]
1718 nodes = [self.changelog.tip()]
1719 b = []
1719 b = []
1720 for n in nodes:
1720 for n in nodes:
1721 t = n
1721 t = n
1722 while True:
1722 while True:
1723 p = self.changelog.parents(n)
1723 p = self.changelog.parents(n)
1724 if p[1] != nullid or p[0] == nullid:
1724 if p[1] != nullid or p[0] == nullid:
1725 b.append((t, n, p[0], p[1]))
1725 b.append((t, n, p[0], p[1]))
1726 break
1726 break
1727 n = p[0]
1727 n = p[0]
1728 return b
1728 return b
1729
1729
1730 def between(self, pairs):
1730 def between(self, pairs):
1731 r = []
1731 r = []
1732
1732
1733 for top, bottom in pairs:
1733 for top, bottom in pairs:
1734 n, l, i = top, [], 0
1734 n, l, i = top, [], 0
1735 f = 1
1735 f = 1
1736
1736
1737 while n != bottom and n != nullid:
1737 while n != bottom and n != nullid:
1738 p = self.changelog.parents(n)[0]
1738 p = self.changelog.parents(n)[0]
1739 if i == f:
1739 if i == f:
1740 l.append(n)
1740 l.append(n)
1741 f = f * 2
1741 f = f * 2
1742 n = p
1742 n = p
1743 i += 1
1743 i += 1
1744
1744
1745 r.append(l)
1745 r.append(l)
1746
1746
1747 return r
1747 return r
1748
1748
1749 def checkpush(self, pushop):
1749 def checkpush(self, pushop):
1750 """Extensions can override this function if additional checks have
1750 """Extensions can override this function if additional checks have
1751 to be performed before pushing, or call it if they override push
1751 to be performed before pushing, or call it if they override push
1752 command.
1752 command.
1753 """
1753 """
1754 pass
1754 pass
1755
1755
1756 @unfilteredpropertycache
1756 @unfilteredpropertycache
1757 def prepushoutgoinghooks(self):
1757 def prepushoutgoinghooks(self):
1758 """Return util.hooks consists of "(repo, remote, outgoing)"
1758 """Return util.hooks consists of "(repo, remote, outgoing)"
1759 functions, which are called before pushing changesets.
1759 functions, which are called before pushing changesets.
1760 """
1760 """
1761 return util.hooks()
1761 return util.hooks()
1762
1762
1763 def stream_in(self, remote, remotereqs):
1763 def stream_in(self, remote, remotereqs):
1764 # Save remote branchmap. We will use it later
1764 # Save remote branchmap. We will use it later
1765 # to speed up branchcache creation
1765 # to speed up branchcache creation
1766 rbranchmap = None
1766 rbranchmap = None
1767 if remote.capable("branchmap"):
1767 if remote.capable("branchmap"):
1768 rbranchmap = remote.branchmap()
1768 rbranchmap = remote.branchmap()
1769
1769
1770 fp = remote.stream_out()
1770 fp = remote.stream_out()
1771 l = fp.readline()
1771 l = fp.readline()
1772 try:
1772 try:
1773 resp = int(l)
1773 resp = int(l)
1774 except ValueError:
1774 except ValueError:
1775 raise error.ResponseError(
1775 raise error.ResponseError(
1776 _('unexpected response from remote server:'), l)
1776 _('unexpected response from remote server:'), l)
1777 if resp == 1:
1777 if resp == 1:
1778 raise util.Abort(_('operation forbidden by server'))
1778 raise util.Abort(_('operation forbidden by server'))
1779 elif resp == 2:
1779 elif resp == 2:
1780 raise util.Abort(_('locking the remote repository failed'))
1780 raise util.Abort(_('locking the remote repository failed'))
1781 elif resp != 0:
1781 elif resp != 0:
1782 raise util.Abort(_('the server sent an unknown error code'))
1782 raise util.Abort(_('the server sent an unknown error code'))
1783
1783
1784 self.applystreamclone(remotereqs, rbranchmap, fp)
1784 self.applystreamclone(remotereqs, rbranchmap, fp)
1785 return len(self.heads()) + 1
1785 return len(self.heads()) + 1
1786
1786
1787 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1787 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1788 """Apply stream clone data to this repository.
1788 """Apply stream clone data to this repository.
1789
1789
1790 "remotereqs" is a set of requirements to handle the incoming data.
1790 "remotereqs" is a set of requirements to handle the incoming data.
1791 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1791 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1792 can be None.
1792 can be None.
1793 "fp" is a file object containing the raw stream data, suitable for
1793 "fp" is a file object containing the raw stream data, suitable for
1794 feeding into exchange.consumestreamclone.
1794 feeding into exchange.consumestreamclone.
1795 """
1795 """
1796 lock = self.lock()
1796 lock = self.lock()
1797 try:
1797 try:
1798 exchange.consumestreamclone(self, fp)
1798 exchange.consumestreamclone(self, fp)
1799
1799
1800 # new requirements = old non-format requirements +
1800 # new requirements = old non-format requirements +
1801 # new format-related remote requirements
1801 # new format-related remote requirements
1802 # requirements from the streamed-in repository
1802 # requirements from the streamed-in repository
1803 self.requirements = remotereqs | (
1803 self.requirements = remotereqs | (
1804 self.requirements - self.supportedformats)
1804 self.requirements - self.supportedformats)
1805 self._applyopenerreqs()
1805 self._applyopenerreqs()
1806 self._writerequirements()
1806 self._writerequirements()
1807
1807
1808 if remotebranchmap:
1808 if remotebranchmap:
1809 rbheads = []
1809 rbheads = []
1810 closed = []
1810 closed = []
1811 for bheads in remotebranchmap.itervalues():
1811 for bheads in remotebranchmap.itervalues():
1812 rbheads.extend(bheads)
1812 rbheads.extend(bheads)
1813 for h in bheads:
1813 for h in bheads:
1814 r = self.changelog.rev(h)
1814 r = self.changelog.rev(h)
1815 b, c = self.changelog.branchinfo(r)
1815 b, c = self.changelog.branchinfo(r)
1816 if c:
1816 if c:
1817 closed.append(h)
1817 closed.append(h)
1818
1818
1819 if rbheads:
1819 if rbheads:
1820 rtiprev = max((int(self.changelog.rev(node))
1820 rtiprev = max((int(self.changelog.rev(node))
1821 for node in rbheads))
1821 for node in rbheads))
1822 cache = branchmap.branchcache(remotebranchmap,
1822 cache = branchmap.branchcache(remotebranchmap,
1823 self[rtiprev].node(),
1823 self[rtiprev].node(),
1824 rtiprev,
1824 rtiprev,
1825 closednodes=closed)
1825 closednodes=closed)
1826 # Try to stick it as low as possible
1826 # Try to stick it as low as possible
1827 # filter above served are unlikely to be fetch from a clone
1827 # filter above served are unlikely to be fetch from a clone
1828 for candidate in ('base', 'immutable', 'served'):
1828 for candidate in ('base', 'immutable', 'served'):
1829 rview = self.filtered(candidate)
1829 rview = self.filtered(candidate)
1830 if cache.validfor(rview):
1830 if cache.validfor(rview):
1831 self._branchcaches[candidate] = cache
1831 self._branchcaches[candidate] = cache
1832 cache.write(rview)
1832 cache.write(rview)
1833 break
1833 break
1834 self.invalidate()
1834 self.invalidate()
1835 finally:
1835 finally:
1836 lock.release()
1836 lock.release()
1837
1837
1838 def clone(self, remote, heads=[], stream=None):
1838 def clone(self, remote, heads=[], stream=None):
1839 '''clone remote repository.
1839 '''clone remote repository.
1840
1840
1841 keyword arguments:
1841 keyword arguments:
1842 heads: list of revs to clone (forces use of pull)
1842 heads: list of revs to clone (forces use of pull)
1843 stream: use streaming clone if possible'''
1843 stream: use streaming clone if possible'''
1844
1844
1845 # now, all clients that can request uncompressed clones can
1845 # now, all clients that can request uncompressed clones can
1846 # read repo formats supported by all servers that can serve
1846 # read repo formats supported by all servers that can serve
1847 # them.
1847 # them.
1848
1848
1849 # if revlog format changes, client will have to check version
1849 # if revlog format changes, client will have to check version
1850 # and format flags on "stream" capability, and use
1850 # and format flags on "stream" capability, and use
1851 # uncompressed only if compatible.
1851 # uncompressed only if compatible.
1852
1852
1853 if stream is None:
1853 if stream is None:
1854 # if the server explicitly prefers to stream (for fast LANs)
1854 # if the server explicitly prefers to stream (for fast LANs)
1855 stream = remote.capable('stream-preferred')
1855 stream = remote.capable('stream-preferred')
1856
1856
1857 if stream and not heads:
1857 if stream and not heads:
1858 # 'stream' means remote revlog format is revlogv1 only
1858 # 'stream' means remote revlog format is revlogv1 only
1859 if remote.capable('stream'):
1859 if remote.capable('stream'):
1860 self.stream_in(remote, set(('revlogv1',)))
1860 self.stream_in(remote, set(('revlogv1',)))
1861 else:
1861 else:
1862 # otherwise, 'streamreqs' contains the remote revlog format
1862 # otherwise, 'streamreqs' contains the remote revlog format
1863 streamreqs = remote.capable('streamreqs')
1863 streamreqs = remote.capable('streamreqs')
1864 if streamreqs:
1864 if streamreqs:
1865 streamreqs = set(streamreqs.split(','))
1865 streamreqs = set(streamreqs.split(','))
1866 # if we support it, stream in and adjust our requirements
1866 # if we support it, stream in and adjust our requirements
1867 if not streamreqs - self.supportedformats:
1867 if not streamreqs - self.supportedformats:
1868 self.stream_in(remote, streamreqs)
1868 self.stream_in(remote, streamreqs)
1869
1869
1870 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1870 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1871 try:
1871 try:
1872 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1872 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1873 ret = exchange.pull(self, remote, heads).cgresult
1873 ret = exchange.pull(self, remote, heads).cgresult
1874 finally:
1874 finally:
1875 self.ui.restoreconfig(quiet)
1875 self.ui.restoreconfig(quiet)
1876 return ret
1876 return ret
1877
1877
1878 def pushkey(self, namespace, key, old, new):
1878 def pushkey(self, namespace, key, old, new):
1879 try:
1879 try:
1880 tr = self.currenttransaction()
1880 tr = self.currenttransaction()
1881 hookargs = {}
1881 hookargs = {}
1882 if tr is not None:
1882 if tr is not None:
1883 hookargs.update(tr.hookargs)
1883 hookargs.update(tr.hookargs)
1884 pending = lambda: tr.writepending() and self.root or ""
1884 pending = lambda: tr.writepending() and self.root or ""
1885 hookargs['pending'] = pending
1885 hookargs['pending'] = pending
1886 hookargs['namespace'] = namespace
1886 hookargs['namespace'] = namespace
1887 hookargs['key'] = key
1887 hookargs['key'] = key
1888 hookargs['old'] = old
1888 hookargs['old'] = old
1889 hookargs['new'] = new
1889 hookargs['new'] = new
1890 self.hook('prepushkey', throw=True, **hookargs)
1890 self.hook('prepushkey', throw=True, **hookargs)
1891 except error.HookAbort, exc:
1891 except error.HookAbort, exc:
1892 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1892 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1893 if exc.hint:
1893 if exc.hint:
1894 self.ui.write_err(_("(%s)\n") % exc.hint)
1894 self.ui.write_err(_("(%s)\n") % exc.hint)
1895 return False
1895 return False
1896 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1896 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1897 ret = pushkey.push(self, namespace, key, old, new)
1897 ret = pushkey.push(self, namespace, key, old, new)
1898 def runhook():
1898 def runhook():
1899 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1899 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1900 ret=ret)
1900 ret=ret)
1901 self._afterlock(runhook)
1901 self._afterlock(runhook)
1902 return ret
1902 return ret
1903
1903
1904 def listkeys(self, namespace):
1904 def listkeys(self, namespace):
1905 self.hook('prelistkeys', throw=True, namespace=namespace)
1905 self.hook('prelistkeys', throw=True, namespace=namespace)
1906 self.ui.debug('listing keys for "%s"\n' % namespace)
1906 self.ui.debug('listing keys for "%s"\n' % namespace)
1907 values = pushkey.list(self, namespace)
1907 values = pushkey.list(self, namespace)
1908 self.hook('listkeys', namespace=namespace, values=values)
1908 self.hook('listkeys', namespace=namespace, values=values)
1909 return values
1909 return values
1910
1910
1911 def debugwireargs(self, one, two, three=None, four=None, five=None):
1911 def debugwireargs(self, one, two, three=None, four=None, five=None):
1912 '''used to test argument passing over the wire'''
1912 '''used to test argument passing over the wire'''
1913 return "%s %s %s %s %s" % (one, two, three, four, five)
1913 return "%s %s %s %s %s" % (one, two, three, four, five)
1914
1914
1915 def savecommitmessage(self, text):
1915 def savecommitmessage(self, text):
1916 fp = self.vfs('last-message.txt', 'wb')
1916 fp = self.vfs('last-message.txt', 'wb')
1917 try:
1917 try:
1918 fp.write(text)
1918 fp.write(text)
1919 finally:
1919 finally:
1920 fp.close()
1920 fp.close()
1921 return self.pathto(fp.name[len(self.root) + 1:])
1921 return self.pathto(fp.name[len(self.root) + 1:])
1922
1922
1923 # used to avoid circular references so destructors work
1923 # used to avoid circular references so destructors work
1924 def aftertrans(files):
1924 def aftertrans(files):
1925 renamefiles = [tuple(t) for t in files]
1925 renamefiles = [tuple(t) for t in files]
1926 def a():
1926 def a():
1927 for vfs, src, dest in renamefiles:
1927 for vfs, src, dest in renamefiles:
1928 try:
1928 try:
1929 vfs.rename(src, dest)
1929 vfs.rename(src, dest)
1930 except OSError: # journal file does not yet exist
1930 except OSError: # journal file does not yet exist
1931 pass
1931 pass
1932 return a
1932 return a
1933
1933
1934 def undoname(fn):
1934 def undoname(fn):
1935 base, name = os.path.split(fn)
1935 base, name = os.path.split(fn)
1936 assert name.startswith('journal')
1936 assert name.startswith('journal')
1937 return os.path.join(base, name.replace('journal', 'undo', 1))
1937 return os.path.join(base, name.replace('journal', 'undo', 1))
1938
1938
1939 def instance(ui, path, create):
1939 def instance(ui, path, create):
1940 return localrepository(ui, util.urllocalpath(path), create)
1940 return localrepository(ui, util.urllocalpath(path), create)
1941
1941
1942 def islocal(path):
1942 def islocal(path):
1943 return True
1943 return True
@@ -1,1166 +1,1156
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases
10 import util, error, osutil, revset, similar, encoding, phases
11 import pathutil
11 import pathutil
12 import match as matchmod
12 import match as matchmod
13 import os, errno, re, glob, tempfile, shutil, stat, inspect
13 import os, errno, re, glob, tempfile, shutil, stat
14
14
15 if os.name == 'nt':
15 if os.name == 'nt':
16 import scmwindows as scmplatform
16 import scmwindows as scmplatform
17 else:
17 else:
18 import scmposix as scmplatform
18 import scmposix as scmplatform
19
19
20 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
21 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
22
22
23 class status(tuple):
23 class status(tuple):
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 and 'ignored' properties are only relevant to the working copy.
25 and 'ignored' properties are only relevant to the working copy.
26 '''
26 '''
27
27
28 __slots__ = ()
28 __slots__ = ()
29
29
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 clean):
31 clean):
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 ignored, clean))
33 ignored, clean))
34
34
35 @property
35 @property
36 def modified(self):
36 def modified(self):
37 '''files that have been modified'''
37 '''files that have been modified'''
38 return self[0]
38 return self[0]
39
39
40 @property
40 @property
41 def added(self):
41 def added(self):
42 '''files that have been added'''
42 '''files that have been added'''
43 return self[1]
43 return self[1]
44
44
45 @property
45 @property
46 def removed(self):
46 def removed(self):
47 '''files that have been removed'''
47 '''files that have been removed'''
48 return self[2]
48 return self[2]
49
49
50 @property
50 @property
51 def deleted(self):
51 def deleted(self):
52 '''files that are in the dirstate, but have been deleted from the
52 '''files that are in the dirstate, but have been deleted from the
53 working copy (aka "missing")
53 working copy (aka "missing")
54 '''
54 '''
55 return self[3]
55 return self[3]
56
56
57 @property
57 @property
58 def unknown(self):
58 def unknown(self):
59 '''files not in the dirstate that are not ignored'''
59 '''files not in the dirstate that are not ignored'''
60 return self[4]
60 return self[4]
61
61
62 @property
62 @property
63 def ignored(self):
63 def ignored(self):
64 '''files not in the dirstate that are ignored (by _dirignore())'''
64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 return self[5]
65 return self[5]
66
66
67 @property
67 @property
68 def clean(self):
68 def clean(self):
69 '''files that have not been modified'''
69 '''files that have not been modified'''
70 return self[6]
70 return self[6]
71
71
72 def __repr__(self, *args, **kwargs):
72 def __repr__(self, *args, **kwargs):
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 'unknown=%r, ignored=%r, clean=%r>') % self)
74 'unknown=%r, ignored=%r, clean=%r>') % self)
75
75
76 def itersubrepos(ctx1, ctx2):
76 def itersubrepos(ctx1, ctx2):
77 """find subrepos in ctx1 or ctx2"""
77 """find subrepos in ctx1 or ctx2"""
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 # has been modified (in ctx2) but not yet committed (in ctx1).
80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83
83
84 missing = set()
84 missing = set()
85
85
86 for subpath in ctx2.substate:
86 for subpath in ctx2.substate:
87 if subpath not in ctx1.substate:
87 if subpath not in ctx1.substate:
88 del subpaths[subpath]
88 del subpaths[subpath]
89 missing.add(subpath)
89 missing.add(subpath)
90
90
91 for subpath, ctx in sorted(subpaths.iteritems()):
91 for subpath, ctx in sorted(subpaths.iteritems()):
92 yield subpath, ctx.sub(subpath)
92 yield subpath, ctx.sub(subpath)
93
93
94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 # status and diff will have an accurate result when it does
95 # status and diff will have an accurate result when it does
96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 # against itself.
97 # against itself.
98 for subpath in missing:
98 for subpath in missing:
99 yield subpath, ctx2.nullsub(subpath, ctx1)
99 yield subpath, ctx2.nullsub(subpath, ctx1)
100
100
101 def nochangesfound(ui, repo, excluded=None):
101 def nochangesfound(ui, repo, excluded=None):
102 '''Report no changes for push/pull, excluded is None or a list of
102 '''Report no changes for push/pull, excluded is None or a list of
103 nodes excluded from the push/pull.
103 nodes excluded from the push/pull.
104 '''
104 '''
105 secretlist = []
105 secretlist = []
106 if excluded:
106 if excluded:
107 for n in excluded:
107 for n in excluded:
108 if n not in repo:
108 if n not in repo:
109 # discovery should not have included the filtered revision,
109 # discovery should not have included the filtered revision,
110 # we have to explicitly exclude it until discovery is cleanup.
110 # we have to explicitly exclude it until discovery is cleanup.
111 continue
111 continue
112 ctx = repo[n]
112 ctx = repo[n]
113 if ctx.phase() >= phases.secret and not ctx.extinct():
113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 secretlist.append(n)
114 secretlist.append(n)
115
115
116 if secretlist:
116 if secretlist:
117 ui.status(_("no changes found (ignored %d secret changesets)\n")
117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 % len(secretlist))
118 % len(secretlist))
119 else:
119 else:
120 ui.status(_("no changes found\n"))
120 ui.status(_("no changes found\n"))
121
121
122 def checknewlabel(repo, lbl, kind):
122 def checknewlabel(repo, lbl, kind):
123 # Do not use the "kind" parameter in ui output.
123 # Do not use the "kind" parameter in ui output.
124 # It makes strings difficult to translate.
124 # It makes strings difficult to translate.
125 if lbl in ['tip', '.', 'null']:
125 if lbl in ['tip', '.', 'null']:
126 raise util.Abort(_("the name '%s' is reserved") % lbl)
126 raise util.Abort(_("the name '%s' is reserved") % lbl)
127 for c in (':', '\0', '\n', '\r'):
127 for c in (':', '\0', '\n', '\r'):
128 if c in lbl:
128 if c in lbl:
129 raise util.Abort(_("%r cannot be used in a name") % c)
129 raise util.Abort(_("%r cannot be used in a name") % c)
130 try:
130 try:
131 int(lbl)
131 int(lbl)
132 raise util.Abort(_("cannot use an integer as a name"))
132 raise util.Abort(_("cannot use an integer as a name"))
133 except ValueError:
133 except ValueError:
134 pass
134 pass
135
135
136 def checkfilename(f):
136 def checkfilename(f):
137 '''Check that the filename f is an acceptable filename for a tracked file'''
137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 if '\r' in f or '\n' in f:
138 if '\r' in f or '\n' in f:
139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140
140
141 def checkportable(ui, f):
141 def checkportable(ui, f):
142 '''Check if filename f is portable and warn or abort depending on config'''
142 '''Check if filename f is portable and warn or abort depending on config'''
143 checkfilename(f)
143 checkfilename(f)
144 abort, warn = checkportabilityalert(ui)
144 abort, warn = checkportabilityalert(ui)
145 if abort or warn:
145 if abort or warn:
146 msg = util.checkwinfilename(f)
146 msg = util.checkwinfilename(f)
147 if msg:
147 if msg:
148 msg = "%s: %r" % (msg, f)
148 msg = "%s: %r" % (msg, f)
149 if abort:
149 if abort:
150 raise util.Abort(msg)
150 raise util.Abort(msg)
151 ui.warn(_("warning: %s\n") % msg)
151 ui.warn(_("warning: %s\n") % msg)
152
152
153 def checkportabilityalert(ui):
153 def checkportabilityalert(ui):
154 '''check if the user's config requests nothing, a warning, or abort for
154 '''check if the user's config requests nothing, a warning, or abort for
155 non-portable filenames'''
155 non-portable filenames'''
156 val = ui.config('ui', 'portablefilenames', 'warn')
156 val = ui.config('ui', 'portablefilenames', 'warn')
157 lval = val.lower()
157 lval = val.lower()
158 bval = util.parsebool(val)
158 bval = util.parsebool(val)
159 abort = os.name == 'nt' or lval == 'abort'
159 abort = os.name == 'nt' or lval == 'abort'
160 warn = bval or lval == 'warn'
160 warn = bval or lval == 'warn'
161 if bval is None and not (warn or abort or lval == 'ignore'):
161 if bval is None and not (warn or abort or lval == 'ignore'):
162 raise error.ConfigError(
162 raise error.ConfigError(
163 _("ui.portablefilenames value is invalid ('%s')") % val)
163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 return abort, warn
164 return abort, warn
165
165
166 class casecollisionauditor(object):
166 class casecollisionauditor(object):
167 def __init__(self, ui, abort, dirstate):
167 def __init__(self, ui, abort, dirstate):
168 self._ui = ui
168 self._ui = ui
169 self._abort = abort
169 self._abort = abort
170 allfiles = '\0'.join(dirstate._map)
170 allfiles = '\0'.join(dirstate._map)
171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 self._dirstate = dirstate
172 self._dirstate = dirstate
173 # The purpose of _newfiles is so that we don't complain about
173 # The purpose of _newfiles is so that we don't complain about
174 # case collisions if someone were to call this object with the
174 # case collisions if someone were to call this object with the
175 # same filename twice.
175 # same filename twice.
176 self._newfiles = set()
176 self._newfiles = set()
177
177
178 def __call__(self, f):
178 def __call__(self, f):
179 if f in self._newfiles:
179 if f in self._newfiles:
180 return
180 return
181 fl = encoding.lower(f)
181 fl = encoding.lower(f)
182 if fl in self._loweredfiles and f not in self._dirstate:
182 if fl in self._loweredfiles and f not in self._dirstate:
183 msg = _('possible case-folding collision for %s') % f
183 msg = _('possible case-folding collision for %s') % f
184 if self._abort:
184 if self._abort:
185 raise util.Abort(msg)
185 raise util.Abort(msg)
186 self._ui.warn(_("warning: %s\n") % msg)
186 self._ui.warn(_("warning: %s\n") % msg)
187 self._loweredfiles.add(fl)
187 self._loweredfiles.add(fl)
188 self._newfiles.add(f)
188 self._newfiles.add(f)
189
189
190 def develwarn(tui, msg):
191 """issue a developer warning message"""
192 msg = 'devel-warn: ' + msg
193 if tui.tracebackflag:
194 util.debugstacktrace(msg, 2)
195 else:
196 curframe = inspect.currentframe()
197 calframe = inspect.getouterframes(curframe, 2)
198 tui.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[2][1:4]))
199
200 def filteredhash(repo, maxrev):
190 def filteredhash(repo, maxrev):
201 """build hash of filtered revisions in the current repoview.
191 """build hash of filtered revisions in the current repoview.
202
192
203 Multiple caches perform up-to-date validation by checking that the
193 Multiple caches perform up-to-date validation by checking that the
204 tiprev and tipnode stored in the cache file match the current repository.
194 tiprev and tipnode stored in the cache file match the current repository.
205 However, this is not sufficient for validating repoviews because the set
195 However, this is not sufficient for validating repoviews because the set
206 of revisions in the view may change without the repository tiprev and
196 of revisions in the view may change without the repository tiprev and
207 tipnode changing.
197 tipnode changing.
208
198
209 This function hashes all the revs filtered from the view and returns
199 This function hashes all the revs filtered from the view and returns
210 that SHA-1 digest.
200 that SHA-1 digest.
211 """
201 """
212 cl = repo.changelog
202 cl = repo.changelog
213 if not cl.filteredrevs:
203 if not cl.filteredrevs:
214 return None
204 return None
215 key = None
205 key = None
216 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
206 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
217 if revs:
207 if revs:
218 s = util.sha1()
208 s = util.sha1()
219 for rev in revs:
209 for rev in revs:
220 s.update('%s;' % rev)
210 s.update('%s;' % rev)
221 key = s.digest()
211 key = s.digest()
222 return key
212 return key
223
213
224 class abstractvfs(object):
214 class abstractvfs(object):
225 """Abstract base class; cannot be instantiated"""
215 """Abstract base class; cannot be instantiated"""
226
216
227 def __init__(self, *args, **kwargs):
217 def __init__(self, *args, **kwargs):
228 '''Prevent instantiation; don't call this from subclasses.'''
218 '''Prevent instantiation; don't call this from subclasses.'''
229 raise NotImplementedError('attempted instantiating ' + str(type(self)))
219 raise NotImplementedError('attempted instantiating ' + str(type(self)))
230
220
231 def tryread(self, path):
221 def tryread(self, path):
232 '''gracefully return an empty string for missing files'''
222 '''gracefully return an empty string for missing files'''
233 try:
223 try:
234 return self.read(path)
224 return self.read(path)
235 except IOError, inst:
225 except IOError, inst:
236 if inst.errno != errno.ENOENT:
226 if inst.errno != errno.ENOENT:
237 raise
227 raise
238 return ""
228 return ""
239
229
240 def tryreadlines(self, path, mode='rb'):
230 def tryreadlines(self, path, mode='rb'):
241 '''gracefully return an empty array for missing files'''
231 '''gracefully return an empty array for missing files'''
242 try:
232 try:
243 return self.readlines(path, mode=mode)
233 return self.readlines(path, mode=mode)
244 except IOError, inst:
234 except IOError, inst:
245 if inst.errno != errno.ENOENT:
235 if inst.errno != errno.ENOENT:
246 raise
236 raise
247 return []
237 return []
248
238
249 def open(self, path, mode="r", text=False, atomictemp=False,
239 def open(self, path, mode="r", text=False, atomictemp=False,
250 notindexed=False):
240 notindexed=False):
251 '''Open ``path`` file, which is relative to vfs root.
241 '''Open ``path`` file, which is relative to vfs root.
252
242
253 Newly created directories are marked as "not to be indexed by
243 Newly created directories are marked as "not to be indexed by
254 the content indexing service", if ``notindexed`` is specified
244 the content indexing service", if ``notindexed`` is specified
255 for "write" mode access.
245 for "write" mode access.
256 '''
246 '''
257 self.open = self.__call__
247 self.open = self.__call__
258 return self.__call__(path, mode, text, atomictemp, notindexed)
248 return self.__call__(path, mode, text, atomictemp, notindexed)
259
249
260 def read(self, path):
250 def read(self, path):
261 fp = self(path, 'rb')
251 fp = self(path, 'rb')
262 try:
252 try:
263 return fp.read()
253 return fp.read()
264 finally:
254 finally:
265 fp.close()
255 fp.close()
266
256
267 def readlines(self, path, mode='rb'):
257 def readlines(self, path, mode='rb'):
268 fp = self(path, mode=mode)
258 fp = self(path, mode=mode)
269 try:
259 try:
270 return fp.readlines()
260 return fp.readlines()
271 finally:
261 finally:
272 fp.close()
262 fp.close()
273
263
274 def write(self, path, data):
264 def write(self, path, data):
275 fp = self(path, 'wb')
265 fp = self(path, 'wb')
276 try:
266 try:
277 return fp.write(data)
267 return fp.write(data)
278 finally:
268 finally:
279 fp.close()
269 fp.close()
280
270
281 def writelines(self, path, data, mode='wb', notindexed=False):
271 def writelines(self, path, data, mode='wb', notindexed=False):
282 fp = self(path, mode=mode, notindexed=notindexed)
272 fp = self(path, mode=mode, notindexed=notindexed)
283 try:
273 try:
284 return fp.writelines(data)
274 return fp.writelines(data)
285 finally:
275 finally:
286 fp.close()
276 fp.close()
287
277
288 def append(self, path, data):
278 def append(self, path, data):
289 fp = self(path, 'ab')
279 fp = self(path, 'ab')
290 try:
280 try:
291 return fp.write(data)
281 return fp.write(data)
292 finally:
282 finally:
293 fp.close()
283 fp.close()
294
284
295 def chmod(self, path, mode):
285 def chmod(self, path, mode):
296 return os.chmod(self.join(path), mode)
286 return os.chmod(self.join(path), mode)
297
287
298 def exists(self, path=None):
288 def exists(self, path=None):
299 return os.path.exists(self.join(path))
289 return os.path.exists(self.join(path))
300
290
301 def fstat(self, fp):
291 def fstat(self, fp):
302 return util.fstat(fp)
292 return util.fstat(fp)
303
293
304 def isdir(self, path=None):
294 def isdir(self, path=None):
305 return os.path.isdir(self.join(path))
295 return os.path.isdir(self.join(path))
306
296
307 def isfile(self, path=None):
297 def isfile(self, path=None):
308 return os.path.isfile(self.join(path))
298 return os.path.isfile(self.join(path))
309
299
310 def islink(self, path=None):
300 def islink(self, path=None):
311 return os.path.islink(self.join(path))
301 return os.path.islink(self.join(path))
312
302
313 def reljoin(self, *paths):
303 def reljoin(self, *paths):
314 """join various elements of a path together (as os.path.join would do)
304 """join various elements of a path together (as os.path.join would do)
315
305
316 The vfs base is not injected so that path stay relative. This exists
306 The vfs base is not injected so that path stay relative. This exists
317 to allow handling of strange encoding if needed."""
307 to allow handling of strange encoding if needed."""
318 return os.path.join(*paths)
308 return os.path.join(*paths)
319
309
320 def split(self, path):
310 def split(self, path):
321 """split top-most element of a path (as os.path.split would do)
311 """split top-most element of a path (as os.path.split would do)
322
312
323 This exists to allow handling of strange encoding if needed."""
313 This exists to allow handling of strange encoding if needed."""
324 return os.path.split(path)
314 return os.path.split(path)
325
315
326 def lexists(self, path=None):
316 def lexists(self, path=None):
327 return os.path.lexists(self.join(path))
317 return os.path.lexists(self.join(path))
328
318
329 def lstat(self, path=None):
319 def lstat(self, path=None):
330 return os.lstat(self.join(path))
320 return os.lstat(self.join(path))
331
321
332 def listdir(self, path=None):
322 def listdir(self, path=None):
333 return os.listdir(self.join(path))
323 return os.listdir(self.join(path))
334
324
335 def makedir(self, path=None, notindexed=True):
325 def makedir(self, path=None, notindexed=True):
336 return util.makedir(self.join(path), notindexed)
326 return util.makedir(self.join(path), notindexed)
337
327
338 def makedirs(self, path=None, mode=None):
328 def makedirs(self, path=None, mode=None):
339 return util.makedirs(self.join(path), mode)
329 return util.makedirs(self.join(path), mode)
340
330
341 def makelock(self, info, path):
331 def makelock(self, info, path):
342 return util.makelock(info, self.join(path))
332 return util.makelock(info, self.join(path))
343
333
344 def mkdir(self, path=None):
334 def mkdir(self, path=None):
345 return os.mkdir(self.join(path))
335 return os.mkdir(self.join(path))
346
336
347 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
337 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
348 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
338 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
349 dir=self.join(dir), text=text)
339 dir=self.join(dir), text=text)
350 dname, fname = util.split(name)
340 dname, fname = util.split(name)
351 if dir:
341 if dir:
352 return fd, os.path.join(dir, fname)
342 return fd, os.path.join(dir, fname)
353 else:
343 else:
354 return fd, fname
344 return fd, fname
355
345
356 def readdir(self, path=None, stat=None, skip=None):
346 def readdir(self, path=None, stat=None, skip=None):
357 return osutil.listdir(self.join(path), stat, skip)
347 return osutil.listdir(self.join(path), stat, skip)
358
348
359 def readlock(self, path):
349 def readlock(self, path):
360 return util.readlock(self.join(path))
350 return util.readlock(self.join(path))
361
351
362 def rename(self, src, dst):
352 def rename(self, src, dst):
363 return util.rename(self.join(src), self.join(dst))
353 return util.rename(self.join(src), self.join(dst))
364
354
365 def readlink(self, path):
355 def readlink(self, path):
366 return os.readlink(self.join(path))
356 return os.readlink(self.join(path))
367
357
368 def removedirs(self, path=None):
358 def removedirs(self, path=None):
369 """Remove a leaf directory and all empty intermediate ones
359 """Remove a leaf directory and all empty intermediate ones
370 """
360 """
371 return util.removedirs(self.join(path))
361 return util.removedirs(self.join(path))
372
362
373 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
363 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
374 """Remove a directory tree recursively
364 """Remove a directory tree recursively
375
365
376 If ``forcibly``, this tries to remove READ-ONLY files, too.
366 If ``forcibly``, this tries to remove READ-ONLY files, too.
377 """
367 """
378 if forcibly:
368 if forcibly:
379 def onerror(function, path, excinfo):
369 def onerror(function, path, excinfo):
380 if function is not os.remove:
370 if function is not os.remove:
381 raise
371 raise
382 # read-only files cannot be unlinked under Windows
372 # read-only files cannot be unlinked under Windows
383 s = os.stat(path)
373 s = os.stat(path)
384 if (s.st_mode & stat.S_IWRITE) != 0:
374 if (s.st_mode & stat.S_IWRITE) != 0:
385 raise
375 raise
386 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
376 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
387 os.remove(path)
377 os.remove(path)
388 else:
378 else:
389 onerror = None
379 onerror = None
390 return shutil.rmtree(self.join(path),
380 return shutil.rmtree(self.join(path),
391 ignore_errors=ignore_errors, onerror=onerror)
381 ignore_errors=ignore_errors, onerror=onerror)
392
382
393 def setflags(self, path, l, x):
383 def setflags(self, path, l, x):
394 return util.setflags(self.join(path), l, x)
384 return util.setflags(self.join(path), l, x)
395
385
396 def stat(self, path=None):
386 def stat(self, path=None):
397 return os.stat(self.join(path))
387 return os.stat(self.join(path))
398
388
399 def unlink(self, path=None):
389 def unlink(self, path=None):
400 return util.unlink(self.join(path))
390 return util.unlink(self.join(path))
401
391
402 def unlinkpath(self, path=None, ignoremissing=False):
392 def unlinkpath(self, path=None, ignoremissing=False):
403 return util.unlinkpath(self.join(path), ignoremissing)
393 return util.unlinkpath(self.join(path), ignoremissing)
404
394
405 def utime(self, path=None, t=None):
395 def utime(self, path=None, t=None):
406 return os.utime(self.join(path), t)
396 return os.utime(self.join(path), t)
407
397
408 def walk(self, path=None, onerror=None):
398 def walk(self, path=None, onerror=None):
409 """Yield (dirpath, dirs, files) tuple for each directories under path
399 """Yield (dirpath, dirs, files) tuple for each directories under path
410
400
411 ``dirpath`` is relative one from the root of this vfs. This
401 ``dirpath`` is relative one from the root of this vfs. This
412 uses ``os.sep`` as path separator, even you specify POSIX
402 uses ``os.sep`` as path separator, even you specify POSIX
413 style ``path``.
403 style ``path``.
414
404
415 "The root of this vfs" is represented as empty ``dirpath``.
405 "The root of this vfs" is represented as empty ``dirpath``.
416 """
406 """
417 root = os.path.normpath(self.join(None))
407 root = os.path.normpath(self.join(None))
418 # when dirpath == root, dirpath[prefixlen:] becomes empty
408 # when dirpath == root, dirpath[prefixlen:] becomes empty
419 # because len(dirpath) < prefixlen.
409 # because len(dirpath) < prefixlen.
420 prefixlen = len(pathutil.normasprefix(root))
410 prefixlen = len(pathutil.normasprefix(root))
421 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
411 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
422 yield (dirpath[prefixlen:], dirs, files)
412 yield (dirpath[prefixlen:], dirs, files)
423
413
424 class vfs(abstractvfs):
414 class vfs(abstractvfs):
425 '''Operate files relative to a base directory
415 '''Operate files relative to a base directory
426
416
427 This class is used to hide the details of COW semantics and
417 This class is used to hide the details of COW semantics and
428 remote file access from higher level code.
418 remote file access from higher level code.
429 '''
419 '''
430 def __init__(self, base, audit=True, expandpath=False, realpath=False):
420 def __init__(self, base, audit=True, expandpath=False, realpath=False):
431 if expandpath:
421 if expandpath:
432 base = util.expandpath(base)
422 base = util.expandpath(base)
433 if realpath:
423 if realpath:
434 base = os.path.realpath(base)
424 base = os.path.realpath(base)
435 self.base = base
425 self.base = base
436 self._setmustaudit(audit)
426 self._setmustaudit(audit)
437 self.createmode = None
427 self.createmode = None
438 self._trustnlink = None
428 self._trustnlink = None
439
429
440 def _getmustaudit(self):
430 def _getmustaudit(self):
441 return self._audit
431 return self._audit
442
432
443 def _setmustaudit(self, onoff):
433 def _setmustaudit(self, onoff):
444 self._audit = onoff
434 self._audit = onoff
445 if onoff:
435 if onoff:
446 self.audit = pathutil.pathauditor(self.base)
436 self.audit = pathutil.pathauditor(self.base)
447 else:
437 else:
448 self.audit = util.always
438 self.audit = util.always
449
439
450 mustaudit = property(_getmustaudit, _setmustaudit)
440 mustaudit = property(_getmustaudit, _setmustaudit)
451
441
452 @util.propertycache
442 @util.propertycache
453 def _cansymlink(self):
443 def _cansymlink(self):
454 return util.checklink(self.base)
444 return util.checklink(self.base)
455
445
456 @util.propertycache
446 @util.propertycache
457 def _chmod(self):
447 def _chmod(self):
458 return util.checkexec(self.base)
448 return util.checkexec(self.base)
459
449
460 def _fixfilemode(self, name):
450 def _fixfilemode(self, name):
461 if self.createmode is None or not self._chmod:
451 if self.createmode is None or not self._chmod:
462 return
452 return
463 os.chmod(name, self.createmode & 0666)
453 os.chmod(name, self.createmode & 0666)
464
454
465 def __call__(self, path, mode="r", text=False, atomictemp=False,
455 def __call__(self, path, mode="r", text=False, atomictemp=False,
466 notindexed=False):
456 notindexed=False):
467 '''Open ``path`` file, which is relative to vfs root.
457 '''Open ``path`` file, which is relative to vfs root.
468
458
469 Newly created directories are marked as "not to be indexed by
459 Newly created directories are marked as "not to be indexed by
470 the content indexing service", if ``notindexed`` is specified
460 the content indexing service", if ``notindexed`` is specified
471 for "write" mode access.
461 for "write" mode access.
472 '''
462 '''
473 if self._audit:
463 if self._audit:
474 r = util.checkosfilename(path)
464 r = util.checkosfilename(path)
475 if r:
465 if r:
476 raise util.Abort("%s: %r" % (r, path))
466 raise util.Abort("%s: %r" % (r, path))
477 self.audit(path)
467 self.audit(path)
478 f = self.join(path)
468 f = self.join(path)
479
469
480 if not text and "b" not in mode:
470 if not text and "b" not in mode:
481 mode += "b" # for that other OS
471 mode += "b" # for that other OS
482
472
483 nlink = -1
473 nlink = -1
484 if mode not in ('r', 'rb'):
474 if mode not in ('r', 'rb'):
485 dirname, basename = util.split(f)
475 dirname, basename = util.split(f)
486 # If basename is empty, then the path is malformed because it points
476 # If basename is empty, then the path is malformed because it points
487 # to a directory. Let the posixfile() call below raise IOError.
477 # to a directory. Let the posixfile() call below raise IOError.
488 if basename:
478 if basename:
489 if atomictemp:
479 if atomictemp:
490 util.ensuredirs(dirname, self.createmode, notindexed)
480 util.ensuredirs(dirname, self.createmode, notindexed)
491 return util.atomictempfile(f, mode, self.createmode)
481 return util.atomictempfile(f, mode, self.createmode)
492 try:
482 try:
493 if 'w' in mode:
483 if 'w' in mode:
494 util.unlink(f)
484 util.unlink(f)
495 nlink = 0
485 nlink = 0
496 else:
486 else:
497 # nlinks() may behave differently for files on Windows
487 # nlinks() may behave differently for files on Windows
498 # shares if the file is open.
488 # shares if the file is open.
499 fd = util.posixfile(f)
489 fd = util.posixfile(f)
500 nlink = util.nlinks(f)
490 nlink = util.nlinks(f)
501 if nlink < 1:
491 if nlink < 1:
502 nlink = 2 # force mktempcopy (issue1922)
492 nlink = 2 # force mktempcopy (issue1922)
503 fd.close()
493 fd.close()
504 except (OSError, IOError), e:
494 except (OSError, IOError), e:
505 if e.errno != errno.ENOENT:
495 if e.errno != errno.ENOENT:
506 raise
496 raise
507 nlink = 0
497 nlink = 0
508 util.ensuredirs(dirname, self.createmode, notindexed)
498 util.ensuredirs(dirname, self.createmode, notindexed)
509 if nlink > 0:
499 if nlink > 0:
510 if self._trustnlink is None:
500 if self._trustnlink is None:
511 self._trustnlink = nlink > 1 or util.checknlink(f)
501 self._trustnlink = nlink > 1 or util.checknlink(f)
512 if nlink > 1 or not self._trustnlink:
502 if nlink > 1 or not self._trustnlink:
513 util.rename(util.mktempcopy(f), f)
503 util.rename(util.mktempcopy(f), f)
514 fp = util.posixfile(f, mode)
504 fp = util.posixfile(f, mode)
515 if nlink == 0:
505 if nlink == 0:
516 self._fixfilemode(f)
506 self._fixfilemode(f)
517 return fp
507 return fp
518
508
519 def symlink(self, src, dst):
509 def symlink(self, src, dst):
520 self.audit(dst)
510 self.audit(dst)
521 linkname = self.join(dst)
511 linkname = self.join(dst)
522 try:
512 try:
523 os.unlink(linkname)
513 os.unlink(linkname)
524 except OSError:
514 except OSError:
525 pass
515 pass
526
516
527 util.ensuredirs(os.path.dirname(linkname), self.createmode)
517 util.ensuredirs(os.path.dirname(linkname), self.createmode)
528
518
529 if self._cansymlink:
519 if self._cansymlink:
530 try:
520 try:
531 os.symlink(src, linkname)
521 os.symlink(src, linkname)
532 except OSError, err:
522 except OSError, err:
533 raise OSError(err.errno, _('could not symlink to %r: %s') %
523 raise OSError(err.errno, _('could not symlink to %r: %s') %
534 (src, err.strerror), linkname)
524 (src, err.strerror), linkname)
535 else:
525 else:
536 self.write(dst, src)
526 self.write(dst, src)
537
527
538 def join(self, path, *insidef):
528 def join(self, path, *insidef):
539 if path:
529 if path:
540 return os.path.join(self.base, path, *insidef)
530 return os.path.join(self.base, path, *insidef)
541 else:
531 else:
542 return self.base
532 return self.base
543
533
544 opener = vfs
534 opener = vfs
545
535
546 class auditvfs(object):
536 class auditvfs(object):
547 def __init__(self, vfs):
537 def __init__(self, vfs):
548 self.vfs = vfs
538 self.vfs = vfs
549
539
550 def _getmustaudit(self):
540 def _getmustaudit(self):
551 return self.vfs.mustaudit
541 return self.vfs.mustaudit
552
542
553 def _setmustaudit(self, onoff):
543 def _setmustaudit(self, onoff):
554 self.vfs.mustaudit = onoff
544 self.vfs.mustaudit = onoff
555
545
556 mustaudit = property(_getmustaudit, _setmustaudit)
546 mustaudit = property(_getmustaudit, _setmustaudit)
557
547
558 class filtervfs(abstractvfs, auditvfs):
548 class filtervfs(abstractvfs, auditvfs):
559 '''Wrapper vfs for filtering filenames with a function.'''
549 '''Wrapper vfs for filtering filenames with a function.'''
560
550
561 def __init__(self, vfs, filter):
551 def __init__(self, vfs, filter):
562 auditvfs.__init__(self, vfs)
552 auditvfs.__init__(self, vfs)
563 self._filter = filter
553 self._filter = filter
564
554
565 def __call__(self, path, *args, **kwargs):
555 def __call__(self, path, *args, **kwargs):
566 return self.vfs(self._filter(path), *args, **kwargs)
556 return self.vfs(self._filter(path), *args, **kwargs)
567
557
568 def join(self, path, *insidef):
558 def join(self, path, *insidef):
569 if path:
559 if path:
570 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
560 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
571 else:
561 else:
572 return self.vfs.join(path)
562 return self.vfs.join(path)
573
563
574 filteropener = filtervfs
564 filteropener = filtervfs
575
565
576 class readonlyvfs(abstractvfs, auditvfs):
566 class readonlyvfs(abstractvfs, auditvfs):
577 '''Wrapper vfs preventing any writing.'''
567 '''Wrapper vfs preventing any writing.'''
578
568
579 def __init__(self, vfs):
569 def __init__(self, vfs):
580 auditvfs.__init__(self, vfs)
570 auditvfs.__init__(self, vfs)
581
571
582 def __call__(self, path, mode='r', *args, **kw):
572 def __call__(self, path, mode='r', *args, **kw):
583 if mode not in ('r', 'rb'):
573 if mode not in ('r', 'rb'):
584 raise util.Abort('this vfs is read only')
574 raise util.Abort('this vfs is read only')
585 return self.vfs(path, mode, *args, **kw)
575 return self.vfs(path, mode, *args, **kw)
586
576
587
577
588 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
578 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
589 '''yield every hg repository under path, always recursively.
579 '''yield every hg repository under path, always recursively.
590 The recurse flag will only control recursion into repo working dirs'''
580 The recurse flag will only control recursion into repo working dirs'''
591 def errhandler(err):
581 def errhandler(err):
592 if err.filename == path:
582 if err.filename == path:
593 raise err
583 raise err
594 samestat = getattr(os.path, 'samestat', None)
584 samestat = getattr(os.path, 'samestat', None)
595 if followsym and samestat is not None:
585 if followsym and samestat is not None:
596 def adddir(dirlst, dirname):
586 def adddir(dirlst, dirname):
597 match = False
587 match = False
598 dirstat = os.stat(dirname)
588 dirstat = os.stat(dirname)
599 for lstdirstat in dirlst:
589 for lstdirstat in dirlst:
600 if samestat(dirstat, lstdirstat):
590 if samestat(dirstat, lstdirstat):
601 match = True
591 match = True
602 break
592 break
603 if not match:
593 if not match:
604 dirlst.append(dirstat)
594 dirlst.append(dirstat)
605 return not match
595 return not match
606 else:
596 else:
607 followsym = False
597 followsym = False
608
598
609 if (seen_dirs is None) and followsym:
599 if (seen_dirs is None) and followsym:
610 seen_dirs = []
600 seen_dirs = []
611 adddir(seen_dirs, path)
601 adddir(seen_dirs, path)
612 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
602 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
613 dirs.sort()
603 dirs.sort()
614 if '.hg' in dirs:
604 if '.hg' in dirs:
615 yield root # found a repository
605 yield root # found a repository
616 qroot = os.path.join(root, '.hg', 'patches')
606 qroot = os.path.join(root, '.hg', 'patches')
617 if os.path.isdir(os.path.join(qroot, '.hg')):
607 if os.path.isdir(os.path.join(qroot, '.hg')):
618 yield qroot # we have a patch queue repo here
608 yield qroot # we have a patch queue repo here
619 if recurse:
609 if recurse:
620 # avoid recursing inside the .hg directory
610 # avoid recursing inside the .hg directory
621 dirs.remove('.hg')
611 dirs.remove('.hg')
622 else:
612 else:
623 dirs[:] = [] # don't descend further
613 dirs[:] = [] # don't descend further
624 elif followsym:
614 elif followsym:
625 newdirs = []
615 newdirs = []
626 for d in dirs:
616 for d in dirs:
627 fname = os.path.join(root, d)
617 fname = os.path.join(root, d)
628 if adddir(seen_dirs, fname):
618 if adddir(seen_dirs, fname):
629 if os.path.islink(fname):
619 if os.path.islink(fname):
630 for hgname in walkrepos(fname, True, seen_dirs):
620 for hgname in walkrepos(fname, True, seen_dirs):
631 yield hgname
621 yield hgname
632 else:
622 else:
633 newdirs.append(d)
623 newdirs.append(d)
634 dirs[:] = newdirs
624 dirs[:] = newdirs
635
625
636 def osrcpath():
626 def osrcpath():
637 '''return default os-specific hgrc search path'''
627 '''return default os-specific hgrc search path'''
638 path = []
628 path = []
639 defaultpath = os.path.join(util.datapath, 'default.d')
629 defaultpath = os.path.join(util.datapath, 'default.d')
640 if os.path.isdir(defaultpath):
630 if os.path.isdir(defaultpath):
641 for f, kind in osutil.listdir(defaultpath):
631 for f, kind in osutil.listdir(defaultpath):
642 if f.endswith('.rc'):
632 if f.endswith('.rc'):
643 path.append(os.path.join(defaultpath, f))
633 path.append(os.path.join(defaultpath, f))
644 path.extend(systemrcpath())
634 path.extend(systemrcpath())
645 path.extend(userrcpath())
635 path.extend(userrcpath())
646 path = [os.path.normpath(f) for f in path]
636 path = [os.path.normpath(f) for f in path]
647 return path
637 return path
648
638
649 _rcpath = None
639 _rcpath = None
650
640
651 def rcpath():
641 def rcpath():
652 '''return hgrc search path. if env var HGRCPATH is set, use it.
642 '''return hgrc search path. if env var HGRCPATH is set, use it.
653 for each item in path, if directory, use files ending in .rc,
643 for each item in path, if directory, use files ending in .rc,
654 else use item.
644 else use item.
655 make HGRCPATH empty to only look in .hg/hgrc of current repo.
645 make HGRCPATH empty to only look in .hg/hgrc of current repo.
656 if no HGRCPATH, use default os-specific path.'''
646 if no HGRCPATH, use default os-specific path.'''
657 global _rcpath
647 global _rcpath
658 if _rcpath is None:
648 if _rcpath is None:
659 if 'HGRCPATH' in os.environ:
649 if 'HGRCPATH' in os.environ:
660 _rcpath = []
650 _rcpath = []
661 for p in os.environ['HGRCPATH'].split(os.pathsep):
651 for p in os.environ['HGRCPATH'].split(os.pathsep):
662 if not p:
652 if not p:
663 continue
653 continue
664 p = util.expandpath(p)
654 p = util.expandpath(p)
665 if os.path.isdir(p):
655 if os.path.isdir(p):
666 for f, kind in osutil.listdir(p):
656 for f, kind in osutil.listdir(p):
667 if f.endswith('.rc'):
657 if f.endswith('.rc'):
668 _rcpath.append(os.path.join(p, f))
658 _rcpath.append(os.path.join(p, f))
669 else:
659 else:
670 _rcpath.append(p)
660 _rcpath.append(p)
671 else:
661 else:
672 _rcpath = osrcpath()
662 _rcpath = osrcpath()
673 return _rcpath
663 return _rcpath
674
664
675 def intrev(repo, rev):
665 def intrev(repo, rev):
676 """Return integer for a given revision that can be used in comparison or
666 """Return integer for a given revision that can be used in comparison or
677 arithmetic operation"""
667 arithmetic operation"""
678 if rev is None:
668 if rev is None:
679 return len(repo)
669 return len(repo)
680 return rev
670 return rev
681
671
682 def revsingle(repo, revspec, default='.'):
672 def revsingle(repo, revspec, default='.'):
683 if not revspec and revspec != 0:
673 if not revspec and revspec != 0:
684 return repo[default]
674 return repo[default]
685
675
686 l = revrange(repo, [revspec])
676 l = revrange(repo, [revspec])
687 if not l:
677 if not l:
688 raise util.Abort(_('empty revision set'))
678 raise util.Abort(_('empty revision set'))
689 return repo[l.last()]
679 return repo[l.last()]
690
680
691 def revpair(repo, revs):
681 def revpair(repo, revs):
692 if not revs:
682 if not revs:
693 return repo.dirstate.p1(), None
683 return repo.dirstate.p1(), None
694
684
695 l = revrange(repo, revs)
685 l = revrange(repo, revs)
696
686
697 if not l:
687 if not l:
698 first = second = None
688 first = second = None
699 elif l.isascending():
689 elif l.isascending():
700 first = l.min()
690 first = l.min()
701 second = l.max()
691 second = l.max()
702 elif l.isdescending():
692 elif l.isdescending():
703 first = l.max()
693 first = l.max()
704 second = l.min()
694 second = l.min()
705 else:
695 else:
706 first = l.first()
696 first = l.first()
707 second = l.last()
697 second = l.last()
708
698
709 if first is None:
699 if first is None:
710 raise util.Abort(_('empty revision range'))
700 raise util.Abort(_('empty revision range'))
711
701
712 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
702 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
713 return repo.lookup(first), None
703 return repo.lookup(first), None
714
704
715 return repo.lookup(first), repo.lookup(second)
705 return repo.lookup(first), repo.lookup(second)
716
706
717 _revrangesep = ':'
707 _revrangesep = ':'
718
708
719 def revrange(repo, revs):
709 def revrange(repo, revs):
720 """Yield revision as strings from a list of revision specifications."""
710 """Yield revision as strings from a list of revision specifications."""
721
711
722 def revfix(repo, val, defval):
712 def revfix(repo, val, defval):
723 if not val and val != 0 and defval is not None:
713 if not val and val != 0 and defval is not None:
724 return defval
714 return defval
725 return repo[val].rev()
715 return repo[val].rev()
726
716
727 subsets = []
717 subsets = []
728
718
729 revsetaliases = [alias for (alias, _) in
719 revsetaliases = [alias for (alias, _) in
730 repo.ui.configitems("revsetalias")]
720 repo.ui.configitems("revsetalias")]
731
721
732 for spec in revs:
722 for spec in revs:
733 # attempt to parse old-style ranges first to deal with
723 # attempt to parse old-style ranges first to deal with
734 # things like old-tag which contain query metacharacters
724 # things like old-tag which contain query metacharacters
735 try:
725 try:
736 # ... except for revset aliases without arguments. These
726 # ... except for revset aliases without arguments. These
737 # should be parsed as soon as possible, because they might
727 # should be parsed as soon as possible, because they might
738 # clash with a hash prefix.
728 # clash with a hash prefix.
739 if spec in revsetaliases:
729 if spec in revsetaliases:
740 raise error.RepoLookupError
730 raise error.RepoLookupError
741
731
742 if isinstance(spec, int):
732 if isinstance(spec, int):
743 subsets.append(revset.baseset([spec]))
733 subsets.append(revset.baseset([spec]))
744 continue
734 continue
745
735
746 if _revrangesep in spec:
736 if _revrangesep in spec:
747 start, end = spec.split(_revrangesep, 1)
737 start, end = spec.split(_revrangesep, 1)
748 if start in revsetaliases or end in revsetaliases:
738 if start in revsetaliases or end in revsetaliases:
749 raise error.RepoLookupError
739 raise error.RepoLookupError
750
740
751 start = revfix(repo, start, 0)
741 start = revfix(repo, start, 0)
752 end = revfix(repo, end, len(repo) - 1)
742 end = revfix(repo, end, len(repo) - 1)
753 if end == nullrev and start < 0:
743 if end == nullrev and start < 0:
754 start = nullrev
744 start = nullrev
755 if start < end:
745 if start < end:
756 l = revset.spanset(repo, start, end + 1)
746 l = revset.spanset(repo, start, end + 1)
757 else:
747 else:
758 l = revset.spanset(repo, start, end - 1)
748 l = revset.spanset(repo, start, end - 1)
759 subsets.append(l)
749 subsets.append(l)
760 continue
750 continue
761 elif spec and spec in repo: # single unquoted rev
751 elif spec and spec in repo: # single unquoted rev
762 rev = revfix(repo, spec, None)
752 rev = revfix(repo, spec, None)
763 subsets.append(revset.baseset([rev]))
753 subsets.append(revset.baseset([rev]))
764 continue
754 continue
765 except error.RepoLookupError:
755 except error.RepoLookupError:
766 pass
756 pass
767
757
768 # fall through to new-style queries if old-style fails
758 # fall through to new-style queries if old-style fails
769 m = revset.match(repo.ui, spec, repo)
759 m = revset.match(repo.ui, spec, repo)
770 subsets.append(m(repo))
760 subsets.append(m(repo))
771
761
772 return revset._combinesets(subsets)
762 return revset._combinesets(subsets)
773
763
774 def expandpats(pats):
764 def expandpats(pats):
775 '''Expand bare globs when running on windows.
765 '''Expand bare globs when running on windows.
776 On posix we assume it already has already been done by sh.'''
766 On posix we assume it already has already been done by sh.'''
777 if not util.expandglobs:
767 if not util.expandglobs:
778 return list(pats)
768 return list(pats)
779 ret = []
769 ret = []
780 for kindpat in pats:
770 for kindpat in pats:
781 kind, pat = matchmod._patsplit(kindpat, None)
771 kind, pat = matchmod._patsplit(kindpat, None)
782 if kind is None:
772 if kind is None:
783 try:
773 try:
784 globbed = glob.glob(pat)
774 globbed = glob.glob(pat)
785 except re.error:
775 except re.error:
786 globbed = [pat]
776 globbed = [pat]
787 if globbed:
777 if globbed:
788 ret.extend(globbed)
778 ret.extend(globbed)
789 continue
779 continue
790 ret.append(kindpat)
780 ret.append(kindpat)
791 return ret
781 return ret
792
782
793 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath',
783 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath',
794 badfn=None):
784 badfn=None):
795 '''Return a matcher and the patterns that were used.
785 '''Return a matcher and the patterns that were used.
796 The matcher will warn about bad matches, unless an alternate badfn callback
786 The matcher will warn about bad matches, unless an alternate badfn callback
797 is provided.'''
787 is provided.'''
798 if pats == ("",):
788 if pats == ("",):
799 pats = []
789 pats = []
800 if not globbed and default == 'relpath':
790 if not globbed and default == 'relpath':
801 pats = expandpats(pats or [])
791 pats = expandpats(pats or [])
802
792
803 def bad(f, msg):
793 def bad(f, msg):
804 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
794 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
805
795
806 if badfn is None:
796 if badfn is None:
807 badfn = bad
797 badfn = bad
808
798
809 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
799 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
810 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
800 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
811
801
812 if m.always():
802 if m.always():
813 pats = []
803 pats = []
814 return m, pats
804 return m, pats
815
805
816 def match(ctx, pats=[], opts={}, globbed=False, default='relpath', badfn=None):
806 def match(ctx, pats=[], opts={}, globbed=False, default='relpath', badfn=None):
817 '''Return a matcher that will warn about bad matches.'''
807 '''Return a matcher that will warn about bad matches.'''
818 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
808 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
819
809
820 def matchall(repo):
810 def matchall(repo):
821 '''Return a matcher that will efficiently match everything.'''
811 '''Return a matcher that will efficiently match everything.'''
822 return matchmod.always(repo.root, repo.getcwd())
812 return matchmod.always(repo.root, repo.getcwd())
823
813
824 def matchfiles(repo, files, badfn=None):
814 def matchfiles(repo, files, badfn=None):
825 '''Return a matcher that will efficiently match exactly these files.'''
815 '''Return a matcher that will efficiently match exactly these files.'''
826 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
816 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
827
817
828 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
818 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
829 m = matcher
819 m = matcher
830 if dry_run is None:
820 if dry_run is None:
831 dry_run = opts.get('dry_run')
821 dry_run = opts.get('dry_run')
832 if similarity is None:
822 if similarity is None:
833 similarity = float(opts.get('similarity') or 0)
823 similarity = float(opts.get('similarity') or 0)
834
824
835 ret = 0
825 ret = 0
836 join = lambda f: os.path.join(prefix, f)
826 join = lambda f: os.path.join(prefix, f)
837
827
838 def matchessubrepo(matcher, subpath):
828 def matchessubrepo(matcher, subpath):
839 if matcher.exact(subpath):
829 if matcher.exact(subpath):
840 return True
830 return True
841 for f in matcher.files():
831 for f in matcher.files():
842 if f.startswith(subpath):
832 if f.startswith(subpath):
843 return True
833 return True
844 return False
834 return False
845
835
846 wctx = repo[None]
836 wctx = repo[None]
847 for subpath in sorted(wctx.substate):
837 for subpath in sorted(wctx.substate):
848 if opts.get('subrepos') or matchessubrepo(m, subpath):
838 if opts.get('subrepos') or matchessubrepo(m, subpath):
849 sub = wctx.sub(subpath)
839 sub = wctx.sub(subpath)
850 try:
840 try:
851 submatch = matchmod.narrowmatcher(subpath, m)
841 submatch = matchmod.narrowmatcher(subpath, m)
852 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
842 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
853 ret = 1
843 ret = 1
854 except error.LookupError:
844 except error.LookupError:
855 repo.ui.status(_("skipping missing subrepository: %s\n")
845 repo.ui.status(_("skipping missing subrepository: %s\n")
856 % join(subpath))
846 % join(subpath))
857
847
858 rejected = []
848 rejected = []
859 def badfn(f, msg):
849 def badfn(f, msg):
860 if f in m.files():
850 if f in m.files():
861 m.bad(f, msg)
851 m.bad(f, msg)
862 rejected.append(f)
852 rejected.append(f)
863
853
864 badmatch = matchmod.badmatch(m, badfn)
854 badmatch = matchmod.badmatch(m, badfn)
865 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
855 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
866 badmatch)
856 badmatch)
867
857
868 unknownset = set(unknown + forgotten)
858 unknownset = set(unknown + forgotten)
869 toprint = unknownset.copy()
859 toprint = unknownset.copy()
870 toprint.update(deleted)
860 toprint.update(deleted)
871 for abs in sorted(toprint):
861 for abs in sorted(toprint):
872 if repo.ui.verbose or not m.exact(abs):
862 if repo.ui.verbose or not m.exact(abs):
873 if abs in unknownset:
863 if abs in unknownset:
874 status = _('adding %s\n') % m.uipath(abs)
864 status = _('adding %s\n') % m.uipath(abs)
875 else:
865 else:
876 status = _('removing %s\n') % m.uipath(abs)
866 status = _('removing %s\n') % m.uipath(abs)
877 repo.ui.status(status)
867 repo.ui.status(status)
878
868
879 renames = _findrenames(repo, m, added + unknown, removed + deleted,
869 renames = _findrenames(repo, m, added + unknown, removed + deleted,
880 similarity)
870 similarity)
881
871
882 if not dry_run:
872 if not dry_run:
883 _markchanges(repo, unknown + forgotten, deleted, renames)
873 _markchanges(repo, unknown + forgotten, deleted, renames)
884
874
885 for f in rejected:
875 for f in rejected:
886 if f in m.files():
876 if f in m.files():
887 return 1
877 return 1
888 return ret
878 return ret
889
879
890 def marktouched(repo, files, similarity=0.0):
880 def marktouched(repo, files, similarity=0.0):
891 '''Assert that files have somehow been operated upon. files are relative to
881 '''Assert that files have somehow been operated upon. files are relative to
892 the repo root.'''
882 the repo root.'''
893 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
883 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
894 rejected = []
884 rejected = []
895
885
896 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
886 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
897
887
898 if repo.ui.verbose:
888 if repo.ui.verbose:
899 unknownset = set(unknown + forgotten)
889 unknownset = set(unknown + forgotten)
900 toprint = unknownset.copy()
890 toprint = unknownset.copy()
901 toprint.update(deleted)
891 toprint.update(deleted)
902 for abs in sorted(toprint):
892 for abs in sorted(toprint):
903 if abs in unknownset:
893 if abs in unknownset:
904 status = _('adding %s\n') % abs
894 status = _('adding %s\n') % abs
905 else:
895 else:
906 status = _('removing %s\n') % abs
896 status = _('removing %s\n') % abs
907 repo.ui.status(status)
897 repo.ui.status(status)
908
898
909 renames = _findrenames(repo, m, added + unknown, removed + deleted,
899 renames = _findrenames(repo, m, added + unknown, removed + deleted,
910 similarity)
900 similarity)
911
901
912 _markchanges(repo, unknown + forgotten, deleted, renames)
902 _markchanges(repo, unknown + forgotten, deleted, renames)
913
903
914 for f in rejected:
904 for f in rejected:
915 if f in m.files():
905 if f in m.files():
916 return 1
906 return 1
917 return 0
907 return 0
918
908
919 def _interestingfiles(repo, matcher):
909 def _interestingfiles(repo, matcher):
920 '''Walk dirstate with matcher, looking for files that addremove would care
910 '''Walk dirstate with matcher, looking for files that addremove would care
921 about.
911 about.
922
912
923 This is different from dirstate.status because it doesn't care about
913 This is different from dirstate.status because it doesn't care about
924 whether files are modified or clean.'''
914 whether files are modified or clean.'''
925 added, unknown, deleted, removed, forgotten = [], [], [], [], []
915 added, unknown, deleted, removed, forgotten = [], [], [], [], []
926 audit_path = pathutil.pathauditor(repo.root)
916 audit_path = pathutil.pathauditor(repo.root)
927
917
928 ctx = repo[None]
918 ctx = repo[None]
929 dirstate = repo.dirstate
919 dirstate = repo.dirstate
930 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
920 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
931 full=False)
921 full=False)
932 for abs, st in walkresults.iteritems():
922 for abs, st in walkresults.iteritems():
933 dstate = dirstate[abs]
923 dstate = dirstate[abs]
934 if dstate == '?' and audit_path.check(abs):
924 if dstate == '?' and audit_path.check(abs):
935 unknown.append(abs)
925 unknown.append(abs)
936 elif dstate != 'r' and not st:
926 elif dstate != 'r' and not st:
937 deleted.append(abs)
927 deleted.append(abs)
938 elif dstate == 'r' and st:
928 elif dstate == 'r' and st:
939 forgotten.append(abs)
929 forgotten.append(abs)
940 # for finding renames
930 # for finding renames
941 elif dstate == 'r' and not st:
931 elif dstate == 'r' and not st:
942 removed.append(abs)
932 removed.append(abs)
943 elif dstate == 'a':
933 elif dstate == 'a':
944 added.append(abs)
934 added.append(abs)
945
935
946 return added, unknown, deleted, removed, forgotten
936 return added, unknown, deleted, removed, forgotten
947
937
948 def _findrenames(repo, matcher, added, removed, similarity):
938 def _findrenames(repo, matcher, added, removed, similarity):
949 '''Find renames from removed files to added ones.'''
939 '''Find renames from removed files to added ones.'''
950 renames = {}
940 renames = {}
951 if similarity > 0:
941 if similarity > 0:
952 for old, new, score in similar.findrenames(repo, added, removed,
942 for old, new, score in similar.findrenames(repo, added, removed,
953 similarity):
943 similarity):
954 if (repo.ui.verbose or not matcher.exact(old)
944 if (repo.ui.verbose or not matcher.exact(old)
955 or not matcher.exact(new)):
945 or not matcher.exact(new)):
956 repo.ui.status(_('recording removal of %s as rename to %s '
946 repo.ui.status(_('recording removal of %s as rename to %s '
957 '(%d%% similar)\n') %
947 '(%d%% similar)\n') %
958 (matcher.rel(old), matcher.rel(new),
948 (matcher.rel(old), matcher.rel(new),
959 score * 100))
949 score * 100))
960 renames[new] = old
950 renames[new] = old
961 return renames
951 return renames
962
952
963 def _markchanges(repo, unknown, deleted, renames):
953 def _markchanges(repo, unknown, deleted, renames):
964 '''Marks the files in unknown as added, the files in deleted as removed,
954 '''Marks the files in unknown as added, the files in deleted as removed,
965 and the files in renames as copied.'''
955 and the files in renames as copied.'''
966 wctx = repo[None]
956 wctx = repo[None]
967 wlock = repo.wlock()
957 wlock = repo.wlock()
968 try:
958 try:
969 wctx.forget(deleted)
959 wctx.forget(deleted)
970 wctx.add(unknown)
960 wctx.add(unknown)
971 for new, old in renames.iteritems():
961 for new, old in renames.iteritems():
972 wctx.copy(old, new)
962 wctx.copy(old, new)
973 finally:
963 finally:
974 wlock.release()
964 wlock.release()
975
965
976 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
966 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
977 """Update the dirstate to reflect the intent of copying src to dst. For
967 """Update the dirstate to reflect the intent of copying src to dst. For
978 different reasons it might not end with dst being marked as copied from src.
968 different reasons it might not end with dst being marked as copied from src.
979 """
969 """
980 origsrc = repo.dirstate.copied(src) or src
970 origsrc = repo.dirstate.copied(src) or src
981 if dst == origsrc: # copying back a copy?
971 if dst == origsrc: # copying back a copy?
982 if repo.dirstate[dst] not in 'mn' and not dryrun:
972 if repo.dirstate[dst] not in 'mn' and not dryrun:
983 repo.dirstate.normallookup(dst)
973 repo.dirstate.normallookup(dst)
984 else:
974 else:
985 if repo.dirstate[origsrc] == 'a' and origsrc == src:
975 if repo.dirstate[origsrc] == 'a' and origsrc == src:
986 if not ui.quiet:
976 if not ui.quiet:
987 ui.warn(_("%s has not been committed yet, so no copy "
977 ui.warn(_("%s has not been committed yet, so no copy "
988 "data will be stored for %s.\n")
978 "data will be stored for %s.\n")
989 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
979 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
990 if repo.dirstate[dst] in '?r' and not dryrun:
980 if repo.dirstate[dst] in '?r' and not dryrun:
991 wctx.add([dst])
981 wctx.add([dst])
992 elif not dryrun:
982 elif not dryrun:
993 wctx.copy(origsrc, dst)
983 wctx.copy(origsrc, dst)
994
984
995 def readrequires(opener, supported):
985 def readrequires(opener, supported):
996 '''Reads and parses .hg/requires and checks if all entries found
986 '''Reads and parses .hg/requires and checks if all entries found
997 are in the list of supported features.'''
987 are in the list of supported features.'''
998 requirements = set(opener.read("requires").splitlines())
988 requirements = set(opener.read("requires").splitlines())
999 missings = []
989 missings = []
1000 for r in requirements:
990 for r in requirements:
1001 if r not in supported:
991 if r not in supported:
1002 if not r or not r[0].isalnum():
992 if not r or not r[0].isalnum():
1003 raise error.RequirementError(_(".hg/requires file is corrupt"))
993 raise error.RequirementError(_(".hg/requires file is corrupt"))
1004 missings.append(r)
994 missings.append(r)
1005 missings.sort()
995 missings.sort()
1006 if missings:
996 if missings:
1007 raise error.RequirementError(
997 raise error.RequirementError(
1008 _("repository requires features unknown to this Mercurial: %s")
998 _("repository requires features unknown to this Mercurial: %s")
1009 % " ".join(missings),
999 % " ".join(missings),
1010 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1000 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1011 " for more information"))
1001 " for more information"))
1012 return requirements
1002 return requirements
1013
1003
1014 def writerequires(opener, requirements):
1004 def writerequires(opener, requirements):
1015 reqfile = opener("requires", "w")
1005 reqfile = opener("requires", "w")
1016 for r in sorted(requirements):
1006 for r in sorted(requirements):
1017 reqfile.write("%s\n" % r)
1007 reqfile.write("%s\n" % r)
1018 reqfile.close()
1008 reqfile.close()
1019
1009
1020 class filecachesubentry(object):
1010 class filecachesubentry(object):
1021 def __init__(self, path, stat):
1011 def __init__(self, path, stat):
1022 self.path = path
1012 self.path = path
1023 self.cachestat = None
1013 self.cachestat = None
1024 self._cacheable = None
1014 self._cacheable = None
1025
1015
1026 if stat:
1016 if stat:
1027 self.cachestat = filecachesubentry.stat(self.path)
1017 self.cachestat = filecachesubentry.stat(self.path)
1028
1018
1029 if self.cachestat:
1019 if self.cachestat:
1030 self._cacheable = self.cachestat.cacheable()
1020 self._cacheable = self.cachestat.cacheable()
1031 else:
1021 else:
1032 # None means we don't know yet
1022 # None means we don't know yet
1033 self._cacheable = None
1023 self._cacheable = None
1034
1024
1035 def refresh(self):
1025 def refresh(self):
1036 if self.cacheable():
1026 if self.cacheable():
1037 self.cachestat = filecachesubentry.stat(self.path)
1027 self.cachestat = filecachesubentry.stat(self.path)
1038
1028
1039 def cacheable(self):
1029 def cacheable(self):
1040 if self._cacheable is not None:
1030 if self._cacheable is not None:
1041 return self._cacheable
1031 return self._cacheable
1042
1032
1043 # we don't know yet, assume it is for now
1033 # we don't know yet, assume it is for now
1044 return True
1034 return True
1045
1035
1046 def changed(self):
1036 def changed(self):
1047 # no point in going further if we can't cache it
1037 # no point in going further if we can't cache it
1048 if not self.cacheable():
1038 if not self.cacheable():
1049 return True
1039 return True
1050
1040
1051 newstat = filecachesubentry.stat(self.path)
1041 newstat = filecachesubentry.stat(self.path)
1052
1042
1053 # we may not know if it's cacheable yet, check again now
1043 # we may not know if it's cacheable yet, check again now
1054 if newstat and self._cacheable is None:
1044 if newstat and self._cacheable is None:
1055 self._cacheable = newstat.cacheable()
1045 self._cacheable = newstat.cacheable()
1056
1046
1057 # check again
1047 # check again
1058 if not self._cacheable:
1048 if not self._cacheable:
1059 return True
1049 return True
1060
1050
1061 if self.cachestat != newstat:
1051 if self.cachestat != newstat:
1062 self.cachestat = newstat
1052 self.cachestat = newstat
1063 return True
1053 return True
1064 else:
1054 else:
1065 return False
1055 return False
1066
1056
1067 @staticmethod
1057 @staticmethod
1068 def stat(path):
1058 def stat(path):
1069 try:
1059 try:
1070 return util.cachestat(path)
1060 return util.cachestat(path)
1071 except OSError, e:
1061 except OSError, e:
1072 if e.errno != errno.ENOENT:
1062 if e.errno != errno.ENOENT:
1073 raise
1063 raise
1074
1064
1075 class filecacheentry(object):
1065 class filecacheentry(object):
1076 def __init__(self, paths, stat=True):
1066 def __init__(self, paths, stat=True):
1077 self._entries = []
1067 self._entries = []
1078 for path in paths:
1068 for path in paths:
1079 self._entries.append(filecachesubentry(path, stat))
1069 self._entries.append(filecachesubentry(path, stat))
1080
1070
1081 def changed(self):
1071 def changed(self):
1082 '''true if any entry has changed'''
1072 '''true if any entry has changed'''
1083 for entry in self._entries:
1073 for entry in self._entries:
1084 if entry.changed():
1074 if entry.changed():
1085 return True
1075 return True
1086 return False
1076 return False
1087
1077
1088 def refresh(self):
1078 def refresh(self):
1089 for entry in self._entries:
1079 for entry in self._entries:
1090 entry.refresh()
1080 entry.refresh()
1091
1081
1092 class filecache(object):
1082 class filecache(object):
1093 '''A property like decorator that tracks files under .hg/ for updates.
1083 '''A property like decorator that tracks files under .hg/ for updates.
1094
1084
1095 Records stat info when called in _filecache.
1085 Records stat info when called in _filecache.
1096
1086
1097 On subsequent calls, compares old stat info with new info, and recreates the
1087 On subsequent calls, compares old stat info with new info, and recreates the
1098 object when any of the files changes, updating the new stat info in
1088 object when any of the files changes, updating the new stat info in
1099 _filecache.
1089 _filecache.
1100
1090
1101 Mercurial either atomic renames or appends for files under .hg,
1091 Mercurial either atomic renames or appends for files under .hg,
1102 so to ensure the cache is reliable we need the filesystem to be able
1092 so to ensure the cache is reliable we need the filesystem to be able
1103 to tell us if a file has been replaced. If it can't, we fallback to
1093 to tell us if a file has been replaced. If it can't, we fallback to
1104 recreating the object on every call (essentially the same behaviour as
1094 recreating the object on every call (essentially the same behaviour as
1105 propertycache).
1095 propertycache).
1106
1096
1107 '''
1097 '''
1108 def __init__(self, *paths):
1098 def __init__(self, *paths):
1109 self.paths = paths
1099 self.paths = paths
1110
1100
1111 def join(self, obj, fname):
1101 def join(self, obj, fname):
1112 """Used to compute the runtime path of a cached file.
1102 """Used to compute the runtime path of a cached file.
1113
1103
1114 Users should subclass filecache and provide their own version of this
1104 Users should subclass filecache and provide their own version of this
1115 function to call the appropriate join function on 'obj' (an instance
1105 function to call the appropriate join function on 'obj' (an instance
1116 of the class that its member function was decorated).
1106 of the class that its member function was decorated).
1117 """
1107 """
1118 return obj.join(fname)
1108 return obj.join(fname)
1119
1109
1120 def __call__(self, func):
1110 def __call__(self, func):
1121 self.func = func
1111 self.func = func
1122 self.name = func.__name__
1112 self.name = func.__name__
1123 return self
1113 return self
1124
1114
1125 def __get__(self, obj, type=None):
1115 def __get__(self, obj, type=None):
1126 # do we need to check if the file changed?
1116 # do we need to check if the file changed?
1127 if self.name in obj.__dict__:
1117 if self.name in obj.__dict__:
1128 assert self.name in obj._filecache, self.name
1118 assert self.name in obj._filecache, self.name
1129 return obj.__dict__[self.name]
1119 return obj.__dict__[self.name]
1130
1120
1131 entry = obj._filecache.get(self.name)
1121 entry = obj._filecache.get(self.name)
1132
1122
1133 if entry:
1123 if entry:
1134 if entry.changed():
1124 if entry.changed():
1135 entry.obj = self.func(obj)
1125 entry.obj = self.func(obj)
1136 else:
1126 else:
1137 paths = [self.join(obj, path) for path in self.paths]
1127 paths = [self.join(obj, path) for path in self.paths]
1138
1128
1139 # We stat -before- creating the object so our cache doesn't lie if
1129 # We stat -before- creating the object so our cache doesn't lie if
1140 # a writer modified between the time we read and stat
1130 # a writer modified between the time we read and stat
1141 entry = filecacheentry(paths, True)
1131 entry = filecacheentry(paths, True)
1142 entry.obj = self.func(obj)
1132 entry.obj = self.func(obj)
1143
1133
1144 obj._filecache[self.name] = entry
1134 obj._filecache[self.name] = entry
1145
1135
1146 obj.__dict__[self.name] = entry.obj
1136 obj.__dict__[self.name] = entry.obj
1147 return entry.obj
1137 return entry.obj
1148
1138
1149 def __set__(self, obj, value):
1139 def __set__(self, obj, value):
1150 if self.name not in obj._filecache:
1140 if self.name not in obj._filecache:
1151 # we add an entry for the missing value because X in __dict__
1141 # we add an entry for the missing value because X in __dict__
1152 # implies X in _filecache
1142 # implies X in _filecache
1153 paths = [self.join(obj, path) for path in self.paths]
1143 paths = [self.join(obj, path) for path in self.paths]
1154 ce = filecacheentry(paths, False)
1144 ce = filecacheentry(paths, False)
1155 obj._filecache[self.name] = ce
1145 obj._filecache[self.name] = ce
1156 else:
1146 else:
1157 ce = obj._filecache[self.name]
1147 ce = obj._filecache[self.name]
1158
1148
1159 ce.obj = value # update cached copy
1149 ce.obj = value # update cached copy
1160 obj.__dict__[self.name] = value # update copy returned by obj.x
1150 obj.__dict__[self.name] = value # update copy returned by obj.x
1161
1151
1162 def __delete__(self, obj):
1152 def __delete__(self, obj):
1163 try:
1153 try:
1164 del obj.__dict__[self.name]
1154 del obj.__dict__[self.name]
1165 except KeyError:
1155 except KeyError:
1166 raise AttributeError(self.name)
1156 raise AttributeError(self.name)
@@ -1,1016 +1,1027
1 # ui.py - user interface bits for mercurial
1 # ui.py - user interface bits for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import inspect
8 from i18n import _
9 from i18n import _
9 import errno, getpass, os, socket, sys, tempfile, traceback
10 import errno, getpass, os, socket, sys, tempfile, traceback
10 import config, scmutil, util, error, formatter, progress
11 import config, scmutil, util, error, formatter, progress
11 from node import hex
12 from node import hex
12
13
13 samplehgrcs = {
14 samplehgrcs = {
14 'user':
15 'user':
15 """# example user config (see "hg help config" for more info)
16 """# example user config (see "hg help config" for more info)
16 [ui]
17 [ui]
17 # name and email, e.g.
18 # name and email, e.g.
18 # username = Jane Doe <jdoe@example.com>
19 # username = Jane Doe <jdoe@example.com>
19 username =
20 username =
20
21
21 [extensions]
22 [extensions]
22 # uncomment these lines to enable some popular extensions
23 # uncomment these lines to enable some popular extensions
23 # (see "hg help extensions" for more info)
24 # (see "hg help extensions" for more info)
24 #
25 #
25 # pager =
26 # pager =
26 # progress =
27 # progress =
27 # color =""",
28 # color =""",
28
29
29 'cloned':
30 'cloned':
30 """# example repository config (see "hg help config" for more info)
31 """# example repository config (see "hg help config" for more info)
31 [paths]
32 [paths]
32 default = %s
33 default = %s
33
34
34 # path aliases to other clones of this repo in URLs or filesystem paths
35 # path aliases to other clones of this repo in URLs or filesystem paths
35 # (see "hg help config.paths" for more info)
36 # (see "hg help config.paths" for more info)
36 #
37 #
37 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
38 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
38 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
39 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
39 # my-clone = /home/jdoe/jdoes-clone
40 # my-clone = /home/jdoe/jdoes-clone
40
41
41 [ui]
42 [ui]
42 # name and email (local to this repository, optional), e.g.
43 # name and email (local to this repository, optional), e.g.
43 # username = Jane Doe <jdoe@example.com>
44 # username = Jane Doe <jdoe@example.com>
44 """,
45 """,
45
46
46 'local':
47 'local':
47 """# example repository config (see "hg help config" for more info)
48 """# example repository config (see "hg help config" for more info)
48 [paths]
49 [paths]
49 # path aliases to other clones of this repo in URLs or filesystem paths
50 # path aliases to other clones of this repo in URLs or filesystem paths
50 # (see "hg help config.paths" for more info)
51 # (see "hg help config.paths" for more info)
51 #
52 #
52 # default = http://example.com/hg/example-repo
53 # default = http://example.com/hg/example-repo
53 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
54 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
54 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
55 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
55 # my-clone = /home/jdoe/jdoes-clone
56 # my-clone = /home/jdoe/jdoes-clone
56
57
57 [ui]
58 [ui]
58 # name and email (local to this repository, optional), e.g.
59 # name and email (local to this repository, optional), e.g.
59 # username = Jane Doe <jdoe@example.com>
60 # username = Jane Doe <jdoe@example.com>
60 """,
61 """,
61
62
62 'global':
63 'global':
63 """# example system-wide hg config (see "hg help config" for more info)
64 """# example system-wide hg config (see "hg help config" for more info)
64
65
65 [extensions]
66 [extensions]
66 # uncomment these lines to enable some popular extensions
67 # uncomment these lines to enable some popular extensions
67 # (see "hg help extensions" for more info)
68 # (see "hg help extensions" for more info)
68 #
69 #
69 # blackbox =
70 # blackbox =
70 # progress =
71 # progress =
71 # color =
72 # color =
72 # pager =""",
73 # pager =""",
73 }
74 }
74
75
75 class ui(object):
76 class ui(object):
76 def __init__(self, src=None):
77 def __init__(self, src=None):
77 # _buffers: used for temporary capture of output
78 # _buffers: used for temporary capture of output
78 self._buffers = []
79 self._buffers = []
79 # _bufferstates:
80 # _bufferstates:
80 # should the temporary capture include stderr and subprocess output
81 # should the temporary capture include stderr and subprocess output
81 self._bufferstates = []
82 self._bufferstates = []
82 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
83 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
83 self._reportuntrusted = True
84 self._reportuntrusted = True
84 self._ocfg = config.config() # overlay
85 self._ocfg = config.config() # overlay
85 self._tcfg = config.config() # trusted
86 self._tcfg = config.config() # trusted
86 self._ucfg = config.config() # untrusted
87 self._ucfg = config.config() # untrusted
87 self._trustusers = set()
88 self._trustusers = set()
88 self._trustgroups = set()
89 self._trustgroups = set()
89 self.callhooks = True
90 self.callhooks = True
90
91
91 if src:
92 if src:
92 self.fout = src.fout
93 self.fout = src.fout
93 self.ferr = src.ferr
94 self.ferr = src.ferr
94 self.fin = src.fin
95 self.fin = src.fin
95
96
96 self._tcfg = src._tcfg.copy()
97 self._tcfg = src._tcfg.copy()
97 self._ucfg = src._ucfg.copy()
98 self._ucfg = src._ucfg.copy()
98 self._ocfg = src._ocfg.copy()
99 self._ocfg = src._ocfg.copy()
99 self._trustusers = src._trustusers.copy()
100 self._trustusers = src._trustusers.copy()
100 self._trustgroups = src._trustgroups.copy()
101 self._trustgroups = src._trustgroups.copy()
101 self.environ = src.environ
102 self.environ = src.environ
102 self.callhooks = src.callhooks
103 self.callhooks = src.callhooks
103 self.fixconfig()
104 self.fixconfig()
104 else:
105 else:
105 self.fout = sys.stdout
106 self.fout = sys.stdout
106 self.ferr = sys.stderr
107 self.ferr = sys.stderr
107 self.fin = sys.stdin
108 self.fin = sys.stdin
108
109
109 # shared read-only environment
110 # shared read-only environment
110 self.environ = os.environ
111 self.environ = os.environ
111 # we always trust global config files
112 # we always trust global config files
112 for f in scmutil.rcpath():
113 for f in scmutil.rcpath():
113 self.readconfig(f, trust=True)
114 self.readconfig(f, trust=True)
114
115
115 def copy(self):
116 def copy(self):
116 return self.__class__(self)
117 return self.__class__(self)
117
118
118 def formatter(self, topic, opts):
119 def formatter(self, topic, opts):
119 return formatter.formatter(self, topic, opts)
120 return formatter.formatter(self, topic, opts)
120
121
121 def _trusted(self, fp, f):
122 def _trusted(self, fp, f):
122 st = util.fstat(fp)
123 st = util.fstat(fp)
123 if util.isowner(st):
124 if util.isowner(st):
124 return True
125 return True
125
126
126 tusers, tgroups = self._trustusers, self._trustgroups
127 tusers, tgroups = self._trustusers, self._trustgroups
127 if '*' in tusers or '*' in tgroups:
128 if '*' in tusers or '*' in tgroups:
128 return True
129 return True
129
130
130 user = util.username(st.st_uid)
131 user = util.username(st.st_uid)
131 group = util.groupname(st.st_gid)
132 group = util.groupname(st.st_gid)
132 if user in tusers or group in tgroups or user == util.username():
133 if user in tusers or group in tgroups or user == util.username():
133 return True
134 return True
134
135
135 if self._reportuntrusted:
136 if self._reportuntrusted:
136 self.warn(_('not trusting file %s from untrusted '
137 self.warn(_('not trusting file %s from untrusted '
137 'user %s, group %s\n') % (f, user, group))
138 'user %s, group %s\n') % (f, user, group))
138 return False
139 return False
139
140
140 def readconfig(self, filename, root=None, trust=False,
141 def readconfig(self, filename, root=None, trust=False,
141 sections=None, remap=None):
142 sections=None, remap=None):
142 try:
143 try:
143 fp = open(filename)
144 fp = open(filename)
144 except IOError:
145 except IOError:
145 if not sections: # ignore unless we were looking for something
146 if not sections: # ignore unless we were looking for something
146 return
147 return
147 raise
148 raise
148
149
149 cfg = config.config()
150 cfg = config.config()
150 trusted = sections or trust or self._trusted(fp, filename)
151 trusted = sections or trust or self._trusted(fp, filename)
151
152
152 try:
153 try:
153 cfg.read(filename, fp, sections=sections, remap=remap)
154 cfg.read(filename, fp, sections=sections, remap=remap)
154 fp.close()
155 fp.close()
155 except error.ConfigError, inst:
156 except error.ConfigError, inst:
156 if trusted:
157 if trusted:
157 raise
158 raise
158 self.warn(_("ignored: %s\n") % str(inst))
159 self.warn(_("ignored: %s\n") % str(inst))
159
160
160 if self.plain():
161 if self.plain():
161 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
162 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
162 'logtemplate', 'statuscopies', 'style',
163 'logtemplate', 'statuscopies', 'style',
163 'traceback', 'verbose'):
164 'traceback', 'verbose'):
164 if k in cfg['ui']:
165 if k in cfg['ui']:
165 del cfg['ui'][k]
166 del cfg['ui'][k]
166 for k, v in cfg.items('defaults'):
167 for k, v in cfg.items('defaults'):
167 del cfg['defaults'][k]
168 del cfg['defaults'][k]
168 # Don't remove aliases from the configuration if in the exceptionlist
169 # Don't remove aliases from the configuration if in the exceptionlist
169 if self.plain('alias'):
170 if self.plain('alias'):
170 for k, v in cfg.items('alias'):
171 for k, v in cfg.items('alias'):
171 del cfg['alias'][k]
172 del cfg['alias'][k]
172 if self.plain('revsetalias'):
173 if self.plain('revsetalias'):
173 for k, v in cfg.items('revsetalias'):
174 for k, v in cfg.items('revsetalias'):
174 del cfg['revsetalias'][k]
175 del cfg['revsetalias'][k]
175
176
176 if trusted:
177 if trusted:
177 self._tcfg.update(cfg)
178 self._tcfg.update(cfg)
178 self._tcfg.update(self._ocfg)
179 self._tcfg.update(self._ocfg)
179 self._ucfg.update(cfg)
180 self._ucfg.update(cfg)
180 self._ucfg.update(self._ocfg)
181 self._ucfg.update(self._ocfg)
181
182
182 if root is None:
183 if root is None:
183 root = os.path.expanduser('~')
184 root = os.path.expanduser('~')
184 self.fixconfig(root=root)
185 self.fixconfig(root=root)
185
186
186 def fixconfig(self, root=None, section=None):
187 def fixconfig(self, root=None, section=None):
187 if section in (None, 'paths'):
188 if section in (None, 'paths'):
188 # expand vars and ~
189 # expand vars and ~
189 # translate paths relative to root (or home) into absolute paths
190 # translate paths relative to root (or home) into absolute paths
190 root = root or os.getcwd()
191 root = root or os.getcwd()
191 for c in self._tcfg, self._ucfg, self._ocfg:
192 for c in self._tcfg, self._ucfg, self._ocfg:
192 for n, p in c.items('paths'):
193 for n, p in c.items('paths'):
193 if not p:
194 if not p:
194 continue
195 continue
195 if '%%' in p:
196 if '%%' in p:
196 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
197 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
197 % (n, p, self.configsource('paths', n)))
198 % (n, p, self.configsource('paths', n)))
198 p = p.replace('%%', '%')
199 p = p.replace('%%', '%')
199 p = util.expandpath(p)
200 p = util.expandpath(p)
200 if not util.hasscheme(p) and not os.path.isabs(p):
201 if not util.hasscheme(p) and not os.path.isabs(p):
201 p = os.path.normpath(os.path.join(root, p))
202 p = os.path.normpath(os.path.join(root, p))
202 c.set("paths", n, p)
203 c.set("paths", n, p)
203
204
204 if section in (None, 'ui'):
205 if section in (None, 'ui'):
205 # update ui options
206 # update ui options
206 self.debugflag = self.configbool('ui', 'debug')
207 self.debugflag = self.configbool('ui', 'debug')
207 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
208 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
208 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
209 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
209 if self.verbose and self.quiet:
210 if self.verbose and self.quiet:
210 self.quiet = self.verbose = False
211 self.quiet = self.verbose = False
211 self._reportuntrusted = self.debugflag or self.configbool("ui",
212 self._reportuntrusted = self.debugflag or self.configbool("ui",
212 "report_untrusted", True)
213 "report_untrusted", True)
213 self.tracebackflag = self.configbool('ui', 'traceback', False)
214 self.tracebackflag = self.configbool('ui', 'traceback', False)
214
215
215 if section in (None, 'trusted'):
216 if section in (None, 'trusted'):
216 # update trust information
217 # update trust information
217 self._trustusers.update(self.configlist('trusted', 'users'))
218 self._trustusers.update(self.configlist('trusted', 'users'))
218 self._trustgroups.update(self.configlist('trusted', 'groups'))
219 self._trustgroups.update(self.configlist('trusted', 'groups'))
219
220
220 def backupconfig(self, section, item):
221 def backupconfig(self, section, item):
221 return (self._ocfg.backup(section, item),
222 return (self._ocfg.backup(section, item),
222 self._tcfg.backup(section, item),
223 self._tcfg.backup(section, item),
223 self._ucfg.backup(section, item),)
224 self._ucfg.backup(section, item),)
224 def restoreconfig(self, data):
225 def restoreconfig(self, data):
225 self._ocfg.restore(data[0])
226 self._ocfg.restore(data[0])
226 self._tcfg.restore(data[1])
227 self._tcfg.restore(data[1])
227 self._ucfg.restore(data[2])
228 self._ucfg.restore(data[2])
228
229
229 def setconfig(self, section, name, value, source=''):
230 def setconfig(self, section, name, value, source=''):
230 for cfg in (self._ocfg, self._tcfg, self._ucfg):
231 for cfg in (self._ocfg, self._tcfg, self._ucfg):
231 cfg.set(section, name, value, source)
232 cfg.set(section, name, value, source)
232 self.fixconfig(section=section)
233 self.fixconfig(section=section)
233
234
234 def _data(self, untrusted):
235 def _data(self, untrusted):
235 return untrusted and self._ucfg or self._tcfg
236 return untrusted and self._ucfg or self._tcfg
236
237
237 def configsource(self, section, name, untrusted=False):
238 def configsource(self, section, name, untrusted=False):
238 return self._data(untrusted).source(section, name) or 'none'
239 return self._data(untrusted).source(section, name) or 'none'
239
240
240 def config(self, section, name, default=None, untrusted=False):
241 def config(self, section, name, default=None, untrusted=False):
241 if isinstance(name, list):
242 if isinstance(name, list):
242 alternates = name
243 alternates = name
243 else:
244 else:
244 alternates = [name]
245 alternates = [name]
245
246
246 for n in alternates:
247 for n in alternates:
247 value = self._data(untrusted).get(section, n, None)
248 value = self._data(untrusted).get(section, n, None)
248 if value is not None:
249 if value is not None:
249 name = n
250 name = n
250 break
251 break
251 else:
252 else:
252 value = default
253 value = default
253
254
254 if self.debugflag and not untrusted and self._reportuntrusted:
255 if self.debugflag and not untrusted and self._reportuntrusted:
255 for n in alternates:
256 for n in alternates:
256 uvalue = self._ucfg.get(section, n)
257 uvalue = self._ucfg.get(section, n)
257 if uvalue is not None and uvalue != value:
258 if uvalue is not None and uvalue != value:
258 self.debug("ignoring untrusted configuration option "
259 self.debug("ignoring untrusted configuration option "
259 "%s.%s = %s\n" % (section, n, uvalue))
260 "%s.%s = %s\n" % (section, n, uvalue))
260 return value
261 return value
261
262
262 def configpath(self, section, name, default=None, untrusted=False):
263 def configpath(self, section, name, default=None, untrusted=False):
263 'get a path config item, expanded relative to repo root or config file'
264 'get a path config item, expanded relative to repo root or config file'
264 v = self.config(section, name, default, untrusted)
265 v = self.config(section, name, default, untrusted)
265 if v is None:
266 if v is None:
266 return None
267 return None
267 if not os.path.isabs(v) or "://" not in v:
268 if not os.path.isabs(v) or "://" not in v:
268 src = self.configsource(section, name, untrusted)
269 src = self.configsource(section, name, untrusted)
269 if ':' in src:
270 if ':' in src:
270 base = os.path.dirname(src.rsplit(':')[0])
271 base = os.path.dirname(src.rsplit(':')[0])
271 v = os.path.join(base, os.path.expanduser(v))
272 v = os.path.join(base, os.path.expanduser(v))
272 return v
273 return v
273
274
274 def configbool(self, section, name, default=False, untrusted=False):
275 def configbool(self, section, name, default=False, untrusted=False):
275 """parse a configuration element as a boolean
276 """parse a configuration element as a boolean
276
277
277 >>> u = ui(); s = 'foo'
278 >>> u = ui(); s = 'foo'
278 >>> u.setconfig(s, 'true', 'yes')
279 >>> u.setconfig(s, 'true', 'yes')
279 >>> u.configbool(s, 'true')
280 >>> u.configbool(s, 'true')
280 True
281 True
281 >>> u.setconfig(s, 'false', 'no')
282 >>> u.setconfig(s, 'false', 'no')
282 >>> u.configbool(s, 'false')
283 >>> u.configbool(s, 'false')
283 False
284 False
284 >>> u.configbool(s, 'unknown')
285 >>> u.configbool(s, 'unknown')
285 False
286 False
286 >>> u.configbool(s, 'unknown', True)
287 >>> u.configbool(s, 'unknown', True)
287 True
288 True
288 >>> u.setconfig(s, 'invalid', 'somevalue')
289 >>> u.setconfig(s, 'invalid', 'somevalue')
289 >>> u.configbool(s, 'invalid')
290 >>> u.configbool(s, 'invalid')
290 Traceback (most recent call last):
291 Traceback (most recent call last):
291 ...
292 ...
292 ConfigError: foo.invalid is not a boolean ('somevalue')
293 ConfigError: foo.invalid is not a boolean ('somevalue')
293 """
294 """
294
295
295 v = self.config(section, name, None, untrusted)
296 v = self.config(section, name, None, untrusted)
296 if v is None:
297 if v is None:
297 return default
298 return default
298 if isinstance(v, bool):
299 if isinstance(v, bool):
299 return v
300 return v
300 b = util.parsebool(v)
301 b = util.parsebool(v)
301 if b is None:
302 if b is None:
302 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
303 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
303 % (section, name, v))
304 % (section, name, v))
304 return b
305 return b
305
306
306 def configint(self, section, name, default=None, untrusted=False):
307 def configint(self, section, name, default=None, untrusted=False):
307 """parse a configuration element as an integer
308 """parse a configuration element as an integer
308
309
309 >>> u = ui(); s = 'foo'
310 >>> u = ui(); s = 'foo'
310 >>> u.setconfig(s, 'int1', '42')
311 >>> u.setconfig(s, 'int1', '42')
311 >>> u.configint(s, 'int1')
312 >>> u.configint(s, 'int1')
312 42
313 42
313 >>> u.setconfig(s, 'int2', '-42')
314 >>> u.setconfig(s, 'int2', '-42')
314 >>> u.configint(s, 'int2')
315 >>> u.configint(s, 'int2')
315 -42
316 -42
316 >>> u.configint(s, 'unknown', 7)
317 >>> u.configint(s, 'unknown', 7)
317 7
318 7
318 >>> u.setconfig(s, 'invalid', 'somevalue')
319 >>> u.setconfig(s, 'invalid', 'somevalue')
319 >>> u.configint(s, 'invalid')
320 >>> u.configint(s, 'invalid')
320 Traceback (most recent call last):
321 Traceback (most recent call last):
321 ...
322 ...
322 ConfigError: foo.invalid is not an integer ('somevalue')
323 ConfigError: foo.invalid is not an integer ('somevalue')
323 """
324 """
324
325
325 v = self.config(section, name, None, untrusted)
326 v = self.config(section, name, None, untrusted)
326 if v is None:
327 if v is None:
327 return default
328 return default
328 try:
329 try:
329 return int(v)
330 return int(v)
330 except ValueError:
331 except ValueError:
331 raise error.ConfigError(_("%s.%s is not an integer ('%s')")
332 raise error.ConfigError(_("%s.%s is not an integer ('%s')")
332 % (section, name, v))
333 % (section, name, v))
333
334
334 def configbytes(self, section, name, default=0, untrusted=False):
335 def configbytes(self, section, name, default=0, untrusted=False):
335 """parse a configuration element as a quantity in bytes
336 """parse a configuration element as a quantity in bytes
336
337
337 Units can be specified as b (bytes), k or kb (kilobytes), m or
338 Units can be specified as b (bytes), k or kb (kilobytes), m or
338 mb (megabytes), g or gb (gigabytes).
339 mb (megabytes), g or gb (gigabytes).
339
340
340 >>> u = ui(); s = 'foo'
341 >>> u = ui(); s = 'foo'
341 >>> u.setconfig(s, 'val1', '42')
342 >>> u.setconfig(s, 'val1', '42')
342 >>> u.configbytes(s, 'val1')
343 >>> u.configbytes(s, 'val1')
343 42
344 42
344 >>> u.setconfig(s, 'val2', '42.5 kb')
345 >>> u.setconfig(s, 'val2', '42.5 kb')
345 >>> u.configbytes(s, 'val2')
346 >>> u.configbytes(s, 'val2')
346 43520
347 43520
347 >>> u.configbytes(s, 'unknown', '7 MB')
348 >>> u.configbytes(s, 'unknown', '7 MB')
348 7340032
349 7340032
349 >>> u.setconfig(s, 'invalid', 'somevalue')
350 >>> u.setconfig(s, 'invalid', 'somevalue')
350 >>> u.configbytes(s, 'invalid')
351 >>> u.configbytes(s, 'invalid')
351 Traceback (most recent call last):
352 Traceback (most recent call last):
352 ...
353 ...
353 ConfigError: foo.invalid is not a byte quantity ('somevalue')
354 ConfigError: foo.invalid is not a byte quantity ('somevalue')
354 """
355 """
355
356
356 value = self.config(section, name)
357 value = self.config(section, name)
357 if value is None:
358 if value is None:
358 if not isinstance(default, str):
359 if not isinstance(default, str):
359 return default
360 return default
360 value = default
361 value = default
361 try:
362 try:
362 return util.sizetoint(value)
363 return util.sizetoint(value)
363 except error.ParseError:
364 except error.ParseError:
364 raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
365 raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
365 % (section, name, value))
366 % (section, name, value))
366
367
367 def configlist(self, section, name, default=None, untrusted=False):
368 def configlist(self, section, name, default=None, untrusted=False):
368 """parse a configuration element as a list of comma/space separated
369 """parse a configuration element as a list of comma/space separated
369 strings
370 strings
370
371
371 >>> u = ui(); s = 'foo'
372 >>> u = ui(); s = 'foo'
372 >>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
373 >>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
373 >>> u.configlist(s, 'list1')
374 >>> u.configlist(s, 'list1')
374 ['this', 'is', 'a small', 'test']
375 ['this', 'is', 'a small', 'test']
375 """
376 """
376
377
377 def _parse_plain(parts, s, offset):
378 def _parse_plain(parts, s, offset):
378 whitespace = False
379 whitespace = False
379 while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
380 while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
380 whitespace = True
381 whitespace = True
381 offset += 1
382 offset += 1
382 if offset >= len(s):
383 if offset >= len(s):
383 return None, parts, offset
384 return None, parts, offset
384 if whitespace:
385 if whitespace:
385 parts.append('')
386 parts.append('')
386 if s[offset] == '"' and not parts[-1]:
387 if s[offset] == '"' and not parts[-1]:
387 return _parse_quote, parts, offset + 1
388 return _parse_quote, parts, offset + 1
388 elif s[offset] == '"' and parts[-1][-1] == '\\':
389 elif s[offset] == '"' and parts[-1][-1] == '\\':
389 parts[-1] = parts[-1][:-1] + s[offset]
390 parts[-1] = parts[-1][:-1] + s[offset]
390 return _parse_plain, parts, offset + 1
391 return _parse_plain, parts, offset + 1
391 parts[-1] += s[offset]
392 parts[-1] += s[offset]
392 return _parse_plain, parts, offset + 1
393 return _parse_plain, parts, offset + 1
393
394
394 def _parse_quote(parts, s, offset):
395 def _parse_quote(parts, s, offset):
395 if offset < len(s) and s[offset] == '"': # ""
396 if offset < len(s) and s[offset] == '"': # ""
396 parts.append('')
397 parts.append('')
397 offset += 1
398 offset += 1
398 while offset < len(s) and (s[offset].isspace() or
399 while offset < len(s) and (s[offset].isspace() or
399 s[offset] == ','):
400 s[offset] == ','):
400 offset += 1
401 offset += 1
401 return _parse_plain, parts, offset
402 return _parse_plain, parts, offset
402
403
403 while offset < len(s) and s[offset] != '"':
404 while offset < len(s) and s[offset] != '"':
404 if (s[offset] == '\\' and offset + 1 < len(s)
405 if (s[offset] == '\\' and offset + 1 < len(s)
405 and s[offset + 1] == '"'):
406 and s[offset + 1] == '"'):
406 offset += 1
407 offset += 1
407 parts[-1] += '"'
408 parts[-1] += '"'
408 else:
409 else:
409 parts[-1] += s[offset]
410 parts[-1] += s[offset]
410 offset += 1
411 offset += 1
411
412
412 if offset >= len(s):
413 if offset >= len(s):
413 real_parts = _configlist(parts[-1])
414 real_parts = _configlist(parts[-1])
414 if not real_parts:
415 if not real_parts:
415 parts[-1] = '"'
416 parts[-1] = '"'
416 else:
417 else:
417 real_parts[0] = '"' + real_parts[0]
418 real_parts[0] = '"' + real_parts[0]
418 parts = parts[:-1]
419 parts = parts[:-1]
419 parts.extend(real_parts)
420 parts.extend(real_parts)
420 return None, parts, offset
421 return None, parts, offset
421
422
422 offset += 1
423 offset += 1
423 while offset < len(s) and s[offset] in [' ', ',']:
424 while offset < len(s) and s[offset] in [' ', ',']:
424 offset += 1
425 offset += 1
425
426
426 if offset < len(s):
427 if offset < len(s):
427 if offset + 1 == len(s) and s[offset] == '"':
428 if offset + 1 == len(s) and s[offset] == '"':
428 parts[-1] += '"'
429 parts[-1] += '"'
429 offset += 1
430 offset += 1
430 else:
431 else:
431 parts.append('')
432 parts.append('')
432 else:
433 else:
433 return None, parts, offset
434 return None, parts, offset
434
435
435 return _parse_plain, parts, offset
436 return _parse_plain, parts, offset
436
437
437 def _configlist(s):
438 def _configlist(s):
438 s = s.rstrip(' ,')
439 s = s.rstrip(' ,')
439 if not s:
440 if not s:
440 return []
441 return []
441 parser, parts, offset = _parse_plain, [''], 0
442 parser, parts, offset = _parse_plain, [''], 0
442 while parser:
443 while parser:
443 parser, parts, offset = parser(parts, s, offset)
444 parser, parts, offset = parser(parts, s, offset)
444 return parts
445 return parts
445
446
446 result = self.config(section, name, untrusted=untrusted)
447 result = self.config(section, name, untrusted=untrusted)
447 if result is None:
448 if result is None:
448 result = default or []
449 result = default or []
449 if isinstance(result, basestring):
450 if isinstance(result, basestring):
450 result = _configlist(result.lstrip(' ,\n'))
451 result = _configlist(result.lstrip(' ,\n'))
451 if result is None:
452 if result is None:
452 result = default or []
453 result = default or []
453 return result
454 return result
454
455
455 def has_section(self, section, untrusted=False):
456 def has_section(self, section, untrusted=False):
456 '''tell whether section exists in config.'''
457 '''tell whether section exists in config.'''
457 return section in self._data(untrusted)
458 return section in self._data(untrusted)
458
459
459 def configitems(self, section, untrusted=False):
460 def configitems(self, section, untrusted=False):
460 items = self._data(untrusted).items(section)
461 items = self._data(untrusted).items(section)
461 if self.debugflag and not untrusted and self._reportuntrusted:
462 if self.debugflag and not untrusted and self._reportuntrusted:
462 for k, v in self._ucfg.items(section):
463 for k, v in self._ucfg.items(section):
463 if self._tcfg.get(section, k) != v:
464 if self._tcfg.get(section, k) != v:
464 self.debug("ignoring untrusted configuration option "
465 self.debug("ignoring untrusted configuration option "
465 "%s.%s = %s\n" % (section, k, v))
466 "%s.%s = %s\n" % (section, k, v))
466 return items
467 return items
467
468
468 def walkconfig(self, untrusted=False):
469 def walkconfig(self, untrusted=False):
469 cfg = self._data(untrusted)
470 cfg = self._data(untrusted)
470 for section in cfg.sections():
471 for section in cfg.sections():
471 for name, value in self.configitems(section, untrusted):
472 for name, value in self.configitems(section, untrusted):
472 yield section, name, value
473 yield section, name, value
473
474
474 def plain(self, feature=None):
475 def plain(self, feature=None):
475 '''is plain mode active?
476 '''is plain mode active?
476
477
477 Plain mode means that all configuration variables which affect
478 Plain mode means that all configuration variables which affect
478 the behavior and output of Mercurial should be
479 the behavior and output of Mercurial should be
479 ignored. Additionally, the output should be stable,
480 ignored. Additionally, the output should be stable,
480 reproducible and suitable for use in scripts or applications.
481 reproducible and suitable for use in scripts or applications.
481
482
482 The only way to trigger plain mode is by setting either the
483 The only way to trigger plain mode is by setting either the
483 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
484 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
484
485
485 The return value can either be
486 The return value can either be
486 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
487 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
487 - True otherwise
488 - True otherwise
488 '''
489 '''
489 if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
490 if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
490 return False
491 return False
491 exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
492 exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
492 if feature and exceptions:
493 if feature and exceptions:
493 return feature not in exceptions
494 return feature not in exceptions
494 return True
495 return True
495
496
496 def username(self):
497 def username(self):
497 """Return default username to be used in commits.
498 """Return default username to be used in commits.
498
499
499 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
500 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
500 and stop searching if one of these is set.
501 and stop searching if one of these is set.
501 If not found and ui.askusername is True, ask the user, else use
502 If not found and ui.askusername is True, ask the user, else use
502 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
503 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
503 """
504 """
504 user = os.environ.get("HGUSER")
505 user = os.environ.get("HGUSER")
505 if user is None:
506 if user is None:
506 user = self.config("ui", ["username", "user"])
507 user = self.config("ui", ["username", "user"])
507 if user is not None:
508 if user is not None:
508 user = os.path.expandvars(user)
509 user = os.path.expandvars(user)
509 if user is None:
510 if user is None:
510 user = os.environ.get("EMAIL")
511 user = os.environ.get("EMAIL")
511 if user is None and self.configbool("ui", "askusername"):
512 if user is None and self.configbool("ui", "askusername"):
512 user = self.prompt(_("enter a commit username:"), default=None)
513 user = self.prompt(_("enter a commit username:"), default=None)
513 if user is None and not self.interactive():
514 if user is None and not self.interactive():
514 try:
515 try:
515 user = '%s@%s' % (util.getuser(), socket.getfqdn())
516 user = '%s@%s' % (util.getuser(), socket.getfqdn())
516 self.warn(_("no username found, using '%s' instead\n") % user)
517 self.warn(_("no username found, using '%s' instead\n") % user)
517 except KeyError:
518 except KeyError:
518 pass
519 pass
519 if not user:
520 if not user:
520 raise util.Abort(_('no username supplied'),
521 raise util.Abort(_('no username supplied'),
521 hint=_('use "hg config --edit" '
522 hint=_('use "hg config --edit" '
522 'to set your username'))
523 'to set your username'))
523 if "\n" in user:
524 if "\n" in user:
524 raise util.Abort(_("username %s contains a newline\n") % repr(user))
525 raise util.Abort(_("username %s contains a newline\n") % repr(user))
525 return user
526 return user
526
527
527 def shortuser(self, user):
528 def shortuser(self, user):
528 """Return a short representation of a user name or email address."""
529 """Return a short representation of a user name or email address."""
529 if not self.verbose:
530 if not self.verbose:
530 user = util.shortuser(user)
531 user = util.shortuser(user)
531 return user
532 return user
532
533
533 def expandpath(self, loc, default=None):
534 def expandpath(self, loc, default=None):
534 """Return repository location relative to cwd or from [paths]"""
535 """Return repository location relative to cwd or from [paths]"""
535 if util.hasscheme(loc) or os.path.isdir(os.path.join(loc, '.hg')):
536 if util.hasscheme(loc) or os.path.isdir(os.path.join(loc, '.hg')):
536 return loc
537 return loc
537
538
538 p = self.paths.getpath(loc, default=default)
539 p = self.paths.getpath(loc, default=default)
539 if p:
540 if p:
540 return p.loc
541 return p.loc
541 return loc
542 return loc
542
543
543 @util.propertycache
544 @util.propertycache
544 def paths(self):
545 def paths(self):
545 return paths(self)
546 return paths(self)
546
547
547 def pushbuffer(self, error=False, subproc=False):
548 def pushbuffer(self, error=False, subproc=False):
548 """install a buffer to capture standard output of the ui object
549 """install a buffer to capture standard output of the ui object
549
550
550 If error is True, the error output will be captured too.
551 If error is True, the error output will be captured too.
551
552
552 If subproc is True, output from subprocesses (typically hooks) will be
553 If subproc is True, output from subprocesses (typically hooks) will be
553 captured too."""
554 captured too."""
554 self._buffers.append([])
555 self._buffers.append([])
555 self._bufferstates.append((error, subproc))
556 self._bufferstates.append((error, subproc))
556
557
557 def popbuffer(self, labeled=False):
558 def popbuffer(self, labeled=False):
558 '''pop the last buffer and return the buffered output
559 '''pop the last buffer and return the buffered output
559
560
560 If labeled is True, any labels associated with buffered
561 If labeled is True, any labels associated with buffered
561 output will be handled. By default, this has no effect
562 output will be handled. By default, this has no effect
562 on the output returned, but extensions and GUI tools may
563 on the output returned, but extensions and GUI tools may
563 handle this argument and returned styled output. If output
564 handle this argument and returned styled output. If output
564 is being buffered so it can be captured and parsed or
565 is being buffered so it can be captured and parsed or
565 processed, labeled should not be set to True.
566 processed, labeled should not be set to True.
566 '''
567 '''
567 self._bufferstates.pop()
568 self._bufferstates.pop()
568 return "".join(self._buffers.pop())
569 return "".join(self._buffers.pop())
569
570
570 def write(self, *args, **opts):
571 def write(self, *args, **opts):
571 '''write args to output
572 '''write args to output
572
573
573 By default, this method simply writes to the buffer or stdout,
574 By default, this method simply writes to the buffer or stdout,
574 but extensions or GUI tools may override this method,
575 but extensions or GUI tools may override this method,
575 write_err(), popbuffer(), and label() to style output from
576 write_err(), popbuffer(), and label() to style output from
576 various parts of hg.
577 various parts of hg.
577
578
578 An optional keyword argument, "label", can be passed in.
579 An optional keyword argument, "label", can be passed in.
579 This should be a string containing label names separated by
580 This should be a string containing label names separated by
580 space. Label names take the form of "topic.type". For example,
581 space. Label names take the form of "topic.type". For example,
581 ui.debug() issues a label of "ui.debug".
582 ui.debug() issues a label of "ui.debug".
582
583
583 When labeling output for a specific command, a label of
584 When labeling output for a specific command, a label of
584 "cmdname.type" is recommended. For example, status issues
585 "cmdname.type" is recommended. For example, status issues
585 a label of "status.modified" for modified files.
586 a label of "status.modified" for modified files.
586 '''
587 '''
587 self._progclear()
588 self._progclear()
588 if self._buffers:
589 if self._buffers:
589 self._buffers[-1].extend([str(a) for a in args])
590 self._buffers[-1].extend([str(a) for a in args])
590 else:
591 else:
591 for a in args:
592 for a in args:
592 self.fout.write(str(a))
593 self.fout.write(str(a))
593
594
594 def write_err(self, *args, **opts):
595 def write_err(self, *args, **opts):
595 self._progclear()
596 self._progclear()
596 try:
597 try:
597 if self._bufferstates and self._bufferstates[-1][0]:
598 if self._bufferstates and self._bufferstates[-1][0]:
598 return self.write(*args, **opts)
599 return self.write(*args, **opts)
599 if not getattr(self.fout, 'closed', False):
600 if not getattr(self.fout, 'closed', False):
600 self.fout.flush()
601 self.fout.flush()
601 for a in args:
602 for a in args:
602 self.ferr.write(str(a))
603 self.ferr.write(str(a))
603 # stderr may be buffered under win32 when redirected to files,
604 # stderr may be buffered under win32 when redirected to files,
604 # including stdout.
605 # including stdout.
605 if not getattr(self.ferr, 'closed', False):
606 if not getattr(self.ferr, 'closed', False):
606 self.ferr.flush()
607 self.ferr.flush()
607 except IOError, inst:
608 except IOError, inst:
608 if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
609 if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
609 raise
610 raise
610
611
611 def flush(self):
612 def flush(self):
612 try: self.fout.flush()
613 try: self.fout.flush()
613 except (IOError, ValueError): pass
614 except (IOError, ValueError): pass
614 try: self.ferr.flush()
615 try: self.ferr.flush()
615 except (IOError, ValueError): pass
616 except (IOError, ValueError): pass
616
617
617 def _isatty(self, fh):
618 def _isatty(self, fh):
618 if self.configbool('ui', 'nontty', False):
619 if self.configbool('ui', 'nontty', False):
619 return False
620 return False
620 return util.isatty(fh)
621 return util.isatty(fh)
621
622
622 def interactive(self):
623 def interactive(self):
623 '''is interactive input allowed?
624 '''is interactive input allowed?
624
625
625 An interactive session is a session where input can be reasonably read
626 An interactive session is a session where input can be reasonably read
626 from `sys.stdin'. If this function returns false, any attempt to read
627 from `sys.stdin'. If this function returns false, any attempt to read
627 from stdin should fail with an error, unless a sensible default has been
628 from stdin should fail with an error, unless a sensible default has been
628 specified.
629 specified.
629
630
630 Interactiveness is triggered by the value of the `ui.interactive'
631 Interactiveness is triggered by the value of the `ui.interactive'
631 configuration variable or - if it is unset - when `sys.stdin' points
632 configuration variable or - if it is unset - when `sys.stdin' points
632 to a terminal device.
633 to a terminal device.
633
634
634 This function refers to input only; for output, see `ui.formatted()'.
635 This function refers to input only; for output, see `ui.formatted()'.
635 '''
636 '''
636 i = self.configbool("ui", "interactive", None)
637 i = self.configbool("ui", "interactive", None)
637 if i is None:
638 if i is None:
638 # some environments replace stdin without implementing isatty
639 # some environments replace stdin without implementing isatty
639 # usually those are non-interactive
640 # usually those are non-interactive
640 return self._isatty(self.fin)
641 return self._isatty(self.fin)
641
642
642 return i
643 return i
643
644
644 def termwidth(self):
645 def termwidth(self):
645 '''how wide is the terminal in columns?
646 '''how wide is the terminal in columns?
646 '''
647 '''
647 if 'COLUMNS' in os.environ:
648 if 'COLUMNS' in os.environ:
648 try:
649 try:
649 return int(os.environ['COLUMNS'])
650 return int(os.environ['COLUMNS'])
650 except ValueError:
651 except ValueError:
651 pass
652 pass
652 return util.termwidth()
653 return util.termwidth()
653
654
654 def formatted(self):
655 def formatted(self):
655 '''should formatted output be used?
656 '''should formatted output be used?
656
657
657 It is often desirable to format the output to suite the output medium.
658 It is often desirable to format the output to suite the output medium.
658 Examples of this are truncating long lines or colorizing messages.
659 Examples of this are truncating long lines or colorizing messages.
659 However, this is not often not desirable when piping output into other
660 However, this is not often not desirable when piping output into other
660 utilities, e.g. `grep'.
661 utilities, e.g. `grep'.
661
662
662 Formatted output is triggered by the value of the `ui.formatted'
663 Formatted output is triggered by the value of the `ui.formatted'
663 configuration variable or - if it is unset - when `sys.stdout' points
664 configuration variable or - if it is unset - when `sys.stdout' points
664 to a terminal device. Please note that `ui.formatted' should be
665 to a terminal device. Please note that `ui.formatted' should be
665 considered an implementation detail; it is not intended for use outside
666 considered an implementation detail; it is not intended for use outside
666 Mercurial or its extensions.
667 Mercurial or its extensions.
667
668
668 This function refers to output only; for input, see `ui.interactive()'.
669 This function refers to output only; for input, see `ui.interactive()'.
669 This function always returns false when in plain mode, see `ui.plain()'.
670 This function always returns false when in plain mode, see `ui.plain()'.
670 '''
671 '''
671 if self.plain():
672 if self.plain():
672 return False
673 return False
673
674
674 i = self.configbool("ui", "formatted", None)
675 i = self.configbool("ui", "formatted", None)
675 if i is None:
676 if i is None:
676 # some environments replace stdout without implementing isatty
677 # some environments replace stdout without implementing isatty
677 # usually those are non-interactive
678 # usually those are non-interactive
678 return self._isatty(self.fout)
679 return self._isatty(self.fout)
679
680
680 return i
681 return i
681
682
682 def _readline(self, prompt=''):
683 def _readline(self, prompt=''):
683 if self._isatty(self.fin):
684 if self._isatty(self.fin):
684 try:
685 try:
685 # magically add command line editing support, where
686 # magically add command line editing support, where
686 # available
687 # available
687 import readline
688 import readline
688 # force demandimport to really load the module
689 # force demandimport to really load the module
689 readline.read_history_file
690 readline.read_history_file
690 # windows sometimes raises something other than ImportError
691 # windows sometimes raises something other than ImportError
691 except Exception:
692 except Exception:
692 pass
693 pass
693
694
694 # call write() so output goes through subclassed implementation
695 # call write() so output goes through subclassed implementation
695 # e.g. color extension on Windows
696 # e.g. color extension on Windows
696 self.write(prompt)
697 self.write(prompt)
697
698
698 # instead of trying to emulate raw_input, swap (self.fin,
699 # instead of trying to emulate raw_input, swap (self.fin,
699 # self.fout) with (sys.stdin, sys.stdout)
700 # self.fout) with (sys.stdin, sys.stdout)
700 oldin = sys.stdin
701 oldin = sys.stdin
701 oldout = sys.stdout
702 oldout = sys.stdout
702 sys.stdin = self.fin
703 sys.stdin = self.fin
703 sys.stdout = self.fout
704 sys.stdout = self.fout
704 # prompt ' ' must exist; otherwise readline may delete entire line
705 # prompt ' ' must exist; otherwise readline may delete entire line
705 # - http://bugs.python.org/issue12833
706 # - http://bugs.python.org/issue12833
706 line = raw_input(' ')
707 line = raw_input(' ')
707 sys.stdin = oldin
708 sys.stdin = oldin
708 sys.stdout = oldout
709 sys.stdout = oldout
709
710
710 # When stdin is in binary mode on Windows, it can cause
711 # When stdin is in binary mode on Windows, it can cause
711 # raw_input() to emit an extra trailing carriage return
712 # raw_input() to emit an extra trailing carriage return
712 if os.linesep == '\r\n' and line and line[-1] == '\r':
713 if os.linesep == '\r\n' and line and line[-1] == '\r':
713 line = line[:-1]
714 line = line[:-1]
714 return line
715 return line
715
716
716 def prompt(self, msg, default="y"):
717 def prompt(self, msg, default="y"):
717 """Prompt user with msg, read response.
718 """Prompt user with msg, read response.
718 If ui is not interactive, the default is returned.
719 If ui is not interactive, the default is returned.
719 """
720 """
720 if not self.interactive():
721 if not self.interactive():
721 self.write(msg, ' ', default, "\n")
722 self.write(msg, ' ', default, "\n")
722 return default
723 return default
723 try:
724 try:
724 r = self._readline(self.label(msg, 'ui.prompt'))
725 r = self._readline(self.label(msg, 'ui.prompt'))
725 if not r:
726 if not r:
726 r = default
727 r = default
727 if self.configbool('ui', 'promptecho'):
728 if self.configbool('ui', 'promptecho'):
728 self.write(r, "\n")
729 self.write(r, "\n")
729 return r
730 return r
730 except EOFError:
731 except EOFError:
731 raise util.Abort(_('response expected'))
732 raise util.Abort(_('response expected'))
732
733
733 @staticmethod
734 @staticmethod
734 def extractchoices(prompt):
735 def extractchoices(prompt):
735 """Extract prompt message and list of choices from specified prompt.
736 """Extract prompt message and list of choices from specified prompt.
736
737
737 This returns tuple "(message, choices)", and "choices" is the
738 This returns tuple "(message, choices)", and "choices" is the
738 list of tuple "(response character, text without &)".
739 list of tuple "(response character, text without &)".
739 """
740 """
740 parts = prompt.split('$$')
741 parts = prompt.split('$$')
741 msg = parts[0].rstrip(' ')
742 msg = parts[0].rstrip(' ')
742 choices = [p.strip(' ') for p in parts[1:]]
743 choices = [p.strip(' ') for p in parts[1:]]
743 return (msg,
744 return (msg,
744 [(s[s.index('&') + 1].lower(), s.replace('&', '', 1))
745 [(s[s.index('&') + 1].lower(), s.replace('&', '', 1))
745 for s in choices])
746 for s in choices])
746
747
747 def promptchoice(self, prompt, default=0):
748 def promptchoice(self, prompt, default=0):
748 """Prompt user with a message, read response, and ensure it matches
749 """Prompt user with a message, read response, and ensure it matches
749 one of the provided choices. The prompt is formatted as follows:
750 one of the provided choices. The prompt is formatted as follows:
750
751
751 "would you like fries with that (Yn)? $$ &Yes $$ &No"
752 "would you like fries with that (Yn)? $$ &Yes $$ &No"
752
753
753 The index of the choice is returned. Responses are case
754 The index of the choice is returned. Responses are case
754 insensitive. If ui is not interactive, the default is
755 insensitive. If ui is not interactive, the default is
755 returned.
756 returned.
756 """
757 """
757
758
758 msg, choices = self.extractchoices(prompt)
759 msg, choices = self.extractchoices(prompt)
759 resps = [r for r, t in choices]
760 resps = [r for r, t in choices]
760 while True:
761 while True:
761 r = self.prompt(msg, resps[default])
762 r = self.prompt(msg, resps[default])
762 if r.lower() in resps:
763 if r.lower() in resps:
763 return resps.index(r.lower())
764 return resps.index(r.lower())
764 self.write(_("unrecognized response\n"))
765 self.write(_("unrecognized response\n"))
765
766
766 def getpass(self, prompt=None, default=None):
767 def getpass(self, prompt=None, default=None):
767 if not self.interactive():
768 if not self.interactive():
768 return default
769 return default
769 try:
770 try:
770 self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
771 self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
771 # disable getpass() only if explicitly specified. it's still valid
772 # disable getpass() only if explicitly specified. it's still valid
772 # to interact with tty even if fin is not a tty.
773 # to interact with tty even if fin is not a tty.
773 if self.configbool('ui', 'nontty'):
774 if self.configbool('ui', 'nontty'):
774 return self.fin.readline().rstrip('\n')
775 return self.fin.readline().rstrip('\n')
775 else:
776 else:
776 return getpass.getpass('')
777 return getpass.getpass('')
777 except EOFError:
778 except EOFError:
778 raise util.Abort(_('response expected'))
779 raise util.Abort(_('response expected'))
779 def status(self, *msg, **opts):
780 def status(self, *msg, **opts):
780 '''write status message to output (if ui.quiet is False)
781 '''write status message to output (if ui.quiet is False)
781
782
782 This adds an output label of "ui.status".
783 This adds an output label of "ui.status".
783 '''
784 '''
784 if not self.quiet:
785 if not self.quiet:
785 opts['label'] = opts.get('label', '') + ' ui.status'
786 opts['label'] = opts.get('label', '') + ' ui.status'
786 self.write(*msg, **opts)
787 self.write(*msg, **opts)
787 def warn(self, *msg, **opts):
788 def warn(self, *msg, **opts):
788 '''write warning message to output (stderr)
789 '''write warning message to output (stderr)
789
790
790 This adds an output label of "ui.warning".
791 This adds an output label of "ui.warning".
791 '''
792 '''
792 opts['label'] = opts.get('label', '') + ' ui.warning'
793 opts['label'] = opts.get('label', '') + ' ui.warning'
793 self.write_err(*msg, **opts)
794 self.write_err(*msg, **opts)
794 def note(self, *msg, **opts):
795 def note(self, *msg, **opts):
795 '''write note to output (if ui.verbose is True)
796 '''write note to output (if ui.verbose is True)
796
797
797 This adds an output label of "ui.note".
798 This adds an output label of "ui.note".
798 '''
799 '''
799 if self.verbose:
800 if self.verbose:
800 opts['label'] = opts.get('label', '') + ' ui.note'
801 opts['label'] = opts.get('label', '') + ' ui.note'
801 self.write(*msg, **opts)
802 self.write(*msg, **opts)
802 def debug(self, *msg, **opts):
803 def debug(self, *msg, **opts):
803 '''write debug message to output (if ui.debugflag is True)
804 '''write debug message to output (if ui.debugflag is True)
804
805
805 This adds an output label of "ui.debug".
806 This adds an output label of "ui.debug".
806 '''
807 '''
807 if self.debugflag:
808 if self.debugflag:
808 opts['label'] = opts.get('label', '') + ' ui.debug'
809 opts['label'] = opts.get('label', '') + ' ui.debug'
809 self.write(*msg, **opts)
810 self.write(*msg, **opts)
810 def edit(self, text, user, extra={}, editform=None):
811 def edit(self, text, user, extra={}, editform=None):
811 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
812 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
812 text=True)
813 text=True)
813 try:
814 try:
814 f = os.fdopen(fd, "w")
815 f = os.fdopen(fd, "w")
815 f.write(text)
816 f.write(text)
816 f.close()
817 f.close()
817
818
818 environ = {'HGUSER': user}
819 environ = {'HGUSER': user}
819 if 'transplant_source' in extra:
820 if 'transplant_source' in extra:
820 environ.update({'HGREVISION': hex(extra['transplant_source'])})
821 environ.update({'HGREVISION': hex(extra['transplant_source'])})
821 for label in ('intermediate-source', 'source', 'rebase_source'):
822 for label in ('intermediate-source', 'source', 'rebase_source'):
822 if label in extra:
823 if label in extra:
823 environ.update({'HGREVISION': extra[label]})
824 environ.update({'HGREVISION': extra[label]})
824 break
825 break
825 if editform:
826 if editform:
826 environ.update({'HGEDITFORM': editform})
827 environ.update({'HGEDITFORM': editform})
827
828
828 editor = self.geteditor()
829 editor = self.geteditor()
829
830
830 self.system("%s \"%s\"" % (editor, name),
831 self.system("%s \"%s\"" % (editor, name),
831 environ=environ,
832 environ=environ,
832 onerr=util.Abort, errprefix=_("edit failed"))
833 onerr=util.Abort, errprefix=_("edit failed"))
833
834
834 f = open(name)
835 f = open(name)
835 t = f.read()
836 t = f.read()
836 f.close()
837 f.close()
837 finally:
838 finally:
838 os.unlink(name)
839 os.unlink(name)
839
840
840 return t
841 return t
841
842
842 def system(self, cmd, environ={}, cwd=None, onerr=None, errprefix=None):
843 def system(self, cmd, environ={}, cwd=None, onerr=None, errprefix=None):
843 '''execute shell command with appropriate output stream. command
844 '''execute shell command with appropriate output stream. command
844 output will be redirected if fout is not stdout.
845 output will be redirected if fout is not stdout.
845 '''
846 '''
846 out = self.fout
847 out = self.fout
847 if any(s[1] for s in self._bufferstates):
848 if any(s[1] for s in self._bufferstates):
848 out = self
849 out = self
849 return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr,
850 return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr,
850 errprefix=errprefix, out=out)
851 errprefix=errprefix, out=out)
851
852
852 def traceback(self, exc=None, force=False):
853 def traceback(self, exc=None, force=False):
853 '''print exception traceback if traceback printing enabled or forced.
854 '''print exception traceback if traceback printing enabled or forced.
854 only to call in exception handler. returns true if traceback
855 only to call in exception handler. returns true if traceback
855 printed.'''
856 printed.'''
856 if self.tracebackflag or force:
857 if self.tracebackflag or force:
857 if exc is None:
858 if exc is None:
858 exc = sys.exc_info()
859 exc = sys.exc_info()
859 cause = getattr(exc[1], 'cause', None)
860 cause = getattr(exc[1], 'cause', None)
860
861
861 if cause is not None:
862 if cause is not None:
862 causetb = traceback.format_tb(cause[2])
863 causetb = traceback.format_tb(cause[2])
863 exctb = traceback.format_tb(exc[2])
864 exctb = traceback.format_tb(exc[2])
864 exconly = traceback.format_exception_only(cause[0], cause[1])
865 exconly = traceback.format_exception_only(cause[0], cause[1])
865
866
866 # exclude frame where 'exc' was chained and rethrown from exctb
867 # exclude frame where 'exc' was chained and rethrown from exctb
867 self.write_err('Traceback (most recent call last):\n',
868 self.write_err('Traceback (most recent call last):\n',
868 ''.join(exctb[:-1]),
869 ''.join(exctb[:-1]),
869 ''.join(causetb),
870 ''.join(causetb),
870 ''.join(exconly))
871 ''.join(exconly))
871 else:
872 else:
872 output = traceback.format_exception(exc[0], exc[1], exc[2])
873 output = traceback.format_exception(exc[0], exc[1], exc[2])
873 self.write_err(''.join(output))
874 self.write_err(''.join(output))
874 return self.tracebackflag or force
875 return self.tracebackflag or force
875
876
876 def geteditor(self):
877 def geteditor(self):
877 '''return editor to use'''
878 '''return editor to use'''
878 if sys.platform == 'plan9':
879 if sys.platform == 'plan9':
879 # vi is the MIPS instruction simulator on Plan 9. We
880 # vi is the MIPS instruction simulator on Plan 9. We
880 # instead default to E to plumb commit messages to
881 # instead default to E to plumb commit messages to
881 # avoid confusion.
882 # avoid confusion.
882 editor = 'E'
883 editor = 'E'
883 else:
884 else:
884 editor = 'vi'
885 editor = 'vi'
885 return (os.environ.get("HGEDITOR") or
886 return (os.environ.get("HGEDITOR") or
886 self.config("ui", "editor") or
887 self.config("ui", "editor") or
887 os.environ.get("VISUAL") or
888 os.environ.get("VISUAL") or
888 os.environ.get("EDITOR", editor))
889 os.environ.get("EDITOR", editor))
889
890
890 @util.propertycache
891 @util.propertycache
891 def _progbar(self):
892 def _progbar(self):
892 """setup the progbar singleton to the ui object"""
893 """setup the progbar singleton to the ui object"""
893 if (self.quiet or self.debugflag
894 if (self.quiet or self.debugflag
894 or self.configbool('progress', 'disable', False)
895 or self.configbool('progress', 'disable', False)
895 or not progress.shouldprint(self)):
896 or not progress.shouldprint(self)):
896 return None
897 return None
897 return getprogbar(self)
898 return getprogbar(self)
898
899
899 def _progclear(self):
900 def _progclear(self):
900 """clear progress bar output if any. use it before any output"""
901 """clear progress bar output if any. use it before any output"""
901 if '_progbar' not in vars(self): # nothing loadef yet
902 if '_progbar' not in vars(self): # nothing loadef yet
902 return
903 return
903 if self._progbar is not None and self._progbar.printed:
904 if self._progbar is not None and self._progbar.printed:
904 self._progbar.clear()
905 self._progbar.clear()
905
906
906 def progress(self, topic, pos, item="", unit="", total=None):
907 def progress(self, topic, pos, item="", unit="", total=None):
907 '''show a progress message
908 '''show a progress message
908
909
909 With stock hg, this is simply a debug message that is hidden
910 With stock hg, this is simply a debug message that is hidden
910 by default, but with extensions or GUI tools it may be
911 by default, but with extensions or GUI tools it may be
911 visible. 'topic' is the current operation, 'item' is a
912 visible. 'topic' is the current operation, 'item' is a
912 non-numeric marker of the current position (i.e. the currently
913 non-numeric marker of the current position (i.e. the currently
913 in-process file), 'pos' is the current numeric position (i.e.
914 in-process file), 'pos' is the current numeric position (i.e.
914 revision, bytes, etc.), unit is a corresponding unit label,
915 revision, bytes, etc.), unit is a corresponding unit label,
915 and total is the highest expected pos.
916 and total is the highest expected pos.
916
917
917 Multiple nested topics may be active at a time.
918 Multiple nested topics may be active at a time.
918
919
919 All topics should be marked closed by setting pos to None at
920 All topics should be marked closed by setting pos to None at
920 termination.
921 termination.
921 '''
922 '''
922 if self._progbar is not None:
923 if self._progbar is not None:
923 self._progbar.progress(topic, pos, item=item, unit=unit,
924 self._progbar.progress(topic, pos, item=item, unit=unit,
924 total=total)
925 total=total)
925 if pos is None or not self.configbool('progress', 'debug'):
926 if pos is None or not self.configbool('progress', 'debug'):
926 return
927 return
927
928
928 if unit:
929 if unit:
929 unit = ' ' + unit
930 unit = ' ' + unit
930 if item:
931 if item:
931 item = ' ' + item
932 item = ' ' + item
932
933
933 if total:
934 if total:
934 pct = 100.0 * pos / total
935 pct = 100.0 * pos / total
935 self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
936 self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
936 % (topic, item, pos, total, unit, pct))
937 % (topic, item, pos, total, unit, pct))
937 else:
938 else:
938 self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
939 self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
939
940
940 def log(self, service, *msg, **opts):
941 def log(self, service, *msg, **opts):
941 '''hook for logging facility extensions
942 '''hook for logging facility extensions
942
943
943 service should be a readily-identifiable subsystem, which will
944 service should be a readily-identifiable subsystem, which will
944 allow filtering.
945 allow filtering.
945 message should be a newline-terminated string to log.
946 message should be a newline-terminated string to log.
946 '''
947 '''
947 pass
948 pass
948
949
949 def label(self, msg, label):
950 def label(self, msg, label):
950 '''style msg based on supplied label
951 '''style msg based on supplied label
951
952
952 Like ui.write(), this just returns msg unchanged, but extensions
953 Like ui.write(), this just returns msg unchanged, but extensions
953 and GUI tools can override it to allow styling output without
954 and GUI tools can override it to allow styling output without
954 writing it.
955 writing it.
955
956
956 ui.write(s, 'label') is equivalent to
957 ui.write(s, 'label') is equivalent to
957 ui.write(ui.label(s, 'label')).
958 ui.write(ui.label(s, 'label')).
958 '''
959 '''
959 return msg
960 return msg
960
961
962 def develwarn(self, msg):
963 """issue a developer warning message"""
964 msg = 'devel-warn: ' + msg
965 if self.tracebackflag:
966 util.debugstacktrace(msg, 2)
967 else:
968 curframe = inspect.currentframe()
969 calframe = inspect.getouterframes(curframe, 2)
970 self.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[2][1:4]))
971
961 class paths(dict):
972 class paths(dict):
962 """Represents a collection of paths and their configs.
973 """Represents a collection of paths and their configs.
963
974
964 Data is initially derived from ui instances and the config files they have
975 Data is initially derived from ui instances and the config files they have
965 loaded.
976 loaded.
966 """
977 """
967 def __init__(self, ui):
978 def __init__(self, ui):
968 dict.__init__(self)
979 dict.__init__(self)
969
980
970 for name, loc in ui.configitems('paths'):
981 for name, loc in ui.configitems('paths'):
971 # No location is the same as not existing.
982 # No location is the same as not existing.
972 if not loc:
983 if not loc:
973 continue
984 continue
974 self[name] = path(name, rawloc=loc)
985 self[name] = path(name, rawloc=loc)
975
986
976 def getpath(self, name, default=None):
987 def getpath(self, name, default=None):
977 """Return a ``path`` for the specified name, falling back to a default.
988 """Return a ``path`` for the specified name, falling back to a default.
978
989
979 Returns the first of ``name`` or ``default`` that is present, or None
990 Returns the first of ``name`` or ``default`` that is present, or None
980 if neither is present.
991 if neither is present.
981 """
992 """
982 try:
993 try:
983 return self[name]
994 return self[name]
984 except KeyError:
995 except KeyError:
985 if default is not None:
996 if default is not None:
986 try:
997 try:
987 return self[default]
998 return self[default]
988 except KeyError:
999 except KeyError:
989 pass
1000 pass
990
1001
991 return None
1002 return None
992
1003
993 class path(object):
1004 class path(object):
994 """Represents an individual path and its configuration."""
1005 """Represents an individual path and its configuration."""
995
1006
996 def __init__(self, name, rawloc=None):
1007 def __init__(self, name, rawloc=None):
997 """Construct a path from its config options.
1008 """Construct a path from its config options.
998
1009
999 ``name`` is the symbolic name of the path.
1010 ``name`` is the symbolic name of the path.
1000 ``rawloc`` is the raw location, as defined in the config.
1011 ``rawloc`` is the raw location, as defined in the config.
1001 """
1012 """
1002 self.name = name
1013 self.name = name
1003 # We'll do more intelligent things with rawloc in the future.
1014 # We'll do more intelligent things with rawloc in the future.
1004 self.loc = rawloc
1015 self.loc = rawloc
1005
1016
1006 # we instantiate one globally shared progress bar to avoid
1017 # we instantiate one globally shared progress bar to avoid
1007 # competing progress bars when multiple UI objects get created
1018 # competing progress bars when multiple UI objects get created
1008 _progresssingleton = None
1019 _progresssingleton = None
1009
1020
1010 def getprogbar(ui):
1021 def getprogbar(ui):
1011 global _progresssingleton
1022 global _progresssingleton
1012 if _progresssingleton is None:
1023 if _progresssingleton is None:
1013 # passing 'ui' object to the singleton is fishy,
1024 # passing 'ui' object to the singleton is fishy,
1014 # this is how the extension used to work but feel free to rework it.
1025 # this is how the extension used to work but feel free to rework it.
1015 _progresssingleton = progress.progbar(ui)
1026 _progresssingleton = progress.progbar(ui)
1016 return _progresssingleton
1027 return _progresssingleton
General Comments 0
You need to be logged in to leave comments. Login now