##// END OF EJS Templates
develwarn: handle the end of line inside the function itself...
Pierre-Yves David -
r24748:d6caadff default
parent child Browse files
Show More
@@ -1,1926 +1,1926
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 cg = exchange.readbundle(self.ui, cg, None)
128 cg = exchange.readbundle(self.ui, cg, None)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 if util.safehasattr(ret, 'getchunks'):
130 if util.safehasattr(ret, 'getchunks'):
131 # This is a bundle20 object, turn it into an unbundler.
131 # This is a bundle20 object, turn it into an unbundler.
132 # This little dance should be dropped eventually when the API
132 # This little dance should be dropped eventually when the API
133 # is finally improved.
133 # is finally improved.
134 stream = util.chunkbuffer(ret.getchunks())
134 stream = util.chunkbuffer(ret.getchunks())
135 ret = bundle2.getunbundler(self.ui, stream)
135 ret = bundle2.getunbundler(self.ui, stream)
136 return ret
136 return ret
137 except error.PushRaced, exc:
137 except error.PushRaced, exc:
138 raise error.ResponseError(_('push failed:'), str(exc))
138 raise error.ResponseError(_('push failed:'), str(exc))
139
139
140 def lock(self):
140 def lock(self):
141 return self._repo.lock()
141 return self._repo.lock()
142
142
143 def addchangegroup(self, cg, source, url):
143 def addchangegroup(self, cg, source, url):
144 return changegroup.addchangegroup(self._repo, cg, source, url)
144 return changegroup.addchangegroup(self._repo, cg, source, url)
145
145
146 def pushkey(self, namespace, key, old, new):
146 def pushkey(self, namespace, key, old, new):
147 return self._repo.pushkey(namespace, key, old, new)
147 return self._repo.pushkey(namespace, key, old, new)
148
148
149 def listkeys(self, namespace):
149 def listkeys(self, namespace):
150 return self._repo.listkeys(namespace)
150 return self._repo.listkeys(namespace)
151
151
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 '''used to test argument passing over the wire'''
153 '''used to test argument passing over the wire'''
154 return "%s %s %s %s %s" % (one, two, three, four, five)
154 return "%s %s %s %s %s" % (one, two, three, four, five)
155
155
156 class locallegacypeer(localpeer):
156 class locallegacypeer(localpeer):
157 '''peer extension which implements legacy methods too; used for tests with
157 '''peer extension which implements legacy methods too; used for tests with
158 restricted capabilities'''
158 restricted capabilities'''
159
159
160 def __init__(self, repo):
160 def __init__(self, repo):
161 localpeer.__init__(self, repo, caps=legacycaps)
161 localpeer.__init__(self, repo, caps=legacycaps)
162
162
163 def branches(self, nodes):
163 def branches(self, nodes):
164 return self._repo.branches(nodes)
164 return self._repo.branches(nodes)
165
165
166 def between(self, pairs):
166 def between(self, pairs):
167 return self._repo.between(pairs)
167 return self._repo.between(pairs)
168
168
169 def changegroup(self, basenodes, source):
169 def changegroup(self, basenodes, source):
170 return changegroup.changegroup(self._repo, basenodes, source)
170 return changegroup.changegroup(self._repo, basenodes, source)
171
171
172 def changegroupsubset(self, bases, heads, source):
172 def changegroupsubset(self, bases, heads, source):
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174
174
175 class localrepository(object):
175 class localrepository(object):
176
176
177 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
177 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 'dotencode'))
179 'dotencode'))
180 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
180 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
181 requirements = ['revlogv1']
181 requirements = ['revlogv1']
182 filtername = None
182 filtername = None
183
183
184 # a list of (ui, featureset) functions.
184 # a list of (ui, featureset) functions.
185 # only functions defined in module of enabled extensions are invoked
185 # only functions defined in module of enabled extensions are invoked
186 featuresetupfuncs = set()
186 featuresetupfuncs = set()
187
187
188 def _baserequirements(self, create):
188 def _baserequirements(self, create):
189 return self.requirements[:]
189 return self.requirements[:]
190
190
191 def __init__(self, baseui, path=None, create=False):
191 def __init__(self, baseui, path=None, create=False):
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wopener = self.wvfs
193 self.wopener = self.wvfs
194 self.root = self.wvfs.base
194 self.root = self.wvfs.base
195 self.path = self.wvfs.join(".hg")
195 self.path = self.wvfs.join(".hg")
196 self.origroot = path
196 self.origroot = path
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.vfs = scmutil.vfs(self.path)
198 self.vfs = scmutil.vfs(self.path)
199 self.opener = self.vfs
199 self.opener = self.vfs
200 self.baseui = baseui
200 self.baseui = baseui
201 self.ui = baseui.copy()
201 self.ui = baseui.copy()
202 self.ui.copy = baseui.copy # prevent copying repo configuration
202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 # A list of callback to shape the phase if no data were found.
203 # A list of callback to shape the phase if no data were found.
204 # Callback are in the form: func(repo, roots) --> processed root.
204 # Callback are in the form: func(repo, roots) --> processed root.
205 # This list it to be filled by extension during repo setup
205 # This list it to be filled by extension during repo setup
206 self._phasedefaults = []
206 self._phasedefaults = []
207 try:
207 try:
208 self.ui.readconfig(self.join("hgrc"), self.root)
208 self.ui.readconfig(self.join("hgrc"), self.root)
209 extensions.loadall(self.ui)
209 extensions.loadall(self.ui)
210 except IOError:
210 except IOError:
211 pass
211 pass
212
212
213 if self.featuresetupfuncs:
213 if self.featuresetupfuncs:
214 self.supported = set(self._basesupported) # use private copy
214 self.supported = set(self._basesupported) # use private copy
215 extmods = set(m.__name__ for n, m
215 extmods = set(m.__name__ for n, m
216 in extensions.extensions(self.ui))
216 in extensions.extensions(self.ui))
217 for setupfunc in self.featuresetupfuncs:
217 for setupfunc in self.featuresetupfuncs:
218 if setupfunc.__module__ in extmods:
218 if setupfunc.__module__ in extmods:
219 setupfunc(self.ui, self.supported)
219 setupfunc(self.ui, self.supported)
220 else:
220 else:
221 self.supported = self._basesupported
221 self.supported = self._basesupported
222
222
223 if not self.vfs.isdir():
223 if not self.vfs.isdir():
224 if create:
224 if create:
225 if not self.wvfs.exists():
225 if not self.wvfs.exists():
226 self.wvfs.makedirs()
226 self.wvfs.makedirs()
227 self.vfs.makedir(notindexed=True)
227 self.vfs.makedir(notindexed=True)
228 requirements = self._baserequirements(create)
228 requirements = self._baserequirements(create)
229 if self.ui.configbool('format', 'usestore', True):
229 if self.ui.configbool('format', 'usestore', True):
230 self.vfs.mkdir("store")
230 self.vfs.mkdir("store")
231 requirements.append("store")
231 requirements.append("store")
232 if self.ui.configbool('format', 'usefncache', True):
232 if self.ui.configbool('format', 'usefncache', True):
233 requirements.append("fncache")
233 requirements.append("fncache")
234 if self.ui.configbool('format', 'dotencode', True):
234 if self.ui.configbool('format', 'dotencode', True):
235 requirements.append('dotencode')
235 requirements.append('dotencode')
236 # create an invalid changelog
236 # create an invalid changelog
237 self.vfs.append(
237 self.vfs.append(
238 "00changelog.i",
238 "00changelog.i",
239 '\0\0\0\2' # represents revlogv2
239 '\0\0\0\2' # represents revlogv2
240 ' dummy changelog to prevent using the old repo layout'
240 ' dummy changelog to prevent using the old repo layout'
241 )
241 )
242 if self.ui.configbool('format', 'generaldelta', False):
242 if self.ui.configbool('format', 'generaldelta', False):
243 requirements.append("generaldelta")
243 requirements.append("generaldelta")
244 if self.ui.configbool('experimental', 'manifestv2', False):
244 if self.ui.configbool('experimental', 'manifestv2', False):
245 requirements.append("manifestv2")
245 requirements.append("manifestv2")
246 requirements = set(requirements)
246 requirements = set(requirements)
247 else:
247 else:
248 raise error.RepoError(_("repository %s not found") % path)
248 raise error.RepoError(_("repository %s not found") % path)
249 elif create:
249 elif create:
250 raise error.RepoError(_("repository %s already exists") % path)
250 raise error.RepoError(_("repository %s already exists") % path)
251 else:
251 else:
252 try:
252 try:
253 requirements = scmutil.readrequires(self.vfs, self.supported)
253 requirements = scmutil.readrequires(self.vfs, self.supported)
254 except IOError, inst:
254 except IOError, inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 requirements = set()
257 requirements = set()
258
258
259 self.sharedpath = self.path
259 self.sharedpath = self.path
260 try:
260 try:
261 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
262 realpath=True)
262 realpath=True)
263 s = vfs.base
263 s = vfs.base
264 if not vfs.exists():
264 if not vfs.exists():
265 raise error.RepoError(
265 raise error.RepoError(
266 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 _('.hg/sharedpath points to nonexistent directory %s') % s)
267 self.sharedpath = s
267 self.sharedpath = s
268 except IOError, inst:
268 except IOError, inst:
269 if inst.errno != errno.ENOENT:
269 if inst.errno != errno.ENOENT:
270 raise
270 raise
271
271
272 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
273 self.spath = self.store.path
273 self.spath = self.store.path
274 self.svfs = self.store.vfs
274 self.svfs = self.store.vfs
275 self.sopener = self.svfs
275 self.sopener = self.svfs
276 self.sjoin = self.store.join
276 self.sjoin = self.store.join
277 self.vfs.createmode = self.store.createmode
277 self.vfs.createmode = self.store.createmode
278 self._applyrequirements(requirements)
278 self._applyrequirements(requirements)
279 if create:
279 if create:
280 self._writerequirements()
280 self._writerequirements()
281
281
282
282
283 self._branchcaches = {}
283 self._branchcaches = {}
284 self._revbranchcache = None
284 self._revbranchcache = None
285 self.filterpats = {}
285 self.filterpats = {}
286 self._datafilters = {}
286 self._datafilters = {}
287 self._transref = self._lockref = self._wlockref = None
287 self._transref = self._lockref = self._wlockref = None
288
288
289 # A cache for various files under .hg/ that tracks file changes,
289 # A cache for various files under .hg/ that tracks file changes,
290 # (used by the filecache decorator)
290 # (used by the filecache decorator)
291 #
291 #
292 # Maps a property name to its util.filecacheentry
292 # Maps a property name to its util.filecacheentry
293 self._filecache = {}
293 self._filecache = {}
294
294
295 # hold sets of revision to be filtered
295 # hold sets of revision to be filtered
296 # should be cleared when something might have changed the filter value:
296 # should be cleared when something might have changed the filter value:
297 # - new changesets,
297 # - new changesets,
298 # - phase change,
298 # - phase change,
299 # - new obsolescence marker,
299 # - new obsolescence marker,
300 # - working directory parent change,
300 # - working directory parent change,
301 # - bookmark changes
301 # - bookmark changes
302 self.filteredrevcache = {}
302 self.filteredrevcache = {}
303
303
304 # generic mapping between names and nodes
304 # generic mapping between names and nodes
305 self.names = namespaces.namespaces()
305 self.names = namespaces.namespaces()
306
306
307 def close(self):
307 def close(self):
308 self._writecaches()
308 self._writecaches()
309
309
310 def _writecaches(self):
310 def _writecaches(self):
311 if self._revbranchcache:
311 if self._revbranchcache:
312 self._revbranchcache.write()
312 self._revbranchcache.write()
313
313
314 def _restrictcapabilities(self, caps):
314 def _restrictcapabilities(self, caps):
315 if self.ui.configbool('experimental', 'bundle2-advertise', True):
315 if self.ui.configbool('experimental', 'bundle2-advertise', True):
316 caps = set(caps)
316 caps = set(caps)
317 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
317 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
318 caps.add('bundle2=' + urllib.quote(capsblob))
318 caps.add('bundle2=' + urllib.quote(capsblob))
319 return caps
319 return caps
320
320
321 def _applyrequirements(self, requirements):
321 def _applyrequirements(self, requirements):
322 self.requirements = requirements
322 self.requirements = requirements
323 self.svfs.options = dict((r, 1) for r in requirements
323 self.svfs.options = dict((r, 1) for r in requirements
324 if r in self.openerreqs)
324 if r in self.openerreqs)
325 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
325 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
326 if chunkcachesize is not None:
326 if chunkcachesize is not None:
327 self.svfs.options['chunkcachesize'] = chunkcachesize
327 self.svfs.options['chunkcachesize'] = chunkcachesize
328 maxchainlen = self.ui.configint('format', 'maxchainlen')
328 maxchainlen = self.ui.configint('format', 'maxchainlen')
329 if maxchainlen is not None:
329 if maxchainlen is not None:
330 self.svfs.options['maxchainlen'] = maxchainlen
330 self.svfs.options['maxchainlen'] = maxchainlen
331 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
331 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
332 if manifestcachesize is not None:
332 if manifestcachesize is not None:
333 self.svfs.options['manifestcachesize'] = manifestcachesize
333 self.svfs.options['manifestcachesize'] = manifestcachesize
334 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
334 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
335 if usetreemanifest is not None:
335 if usetreemanifest is not None:
336 self.svfs.options['usetreemanifest'] = usetreemanifest
336 self.svfs.options['usetreemanifest'] = usetreemanifest
337
337
338 def _writerequirements(self):
338 def _writerequirements(self):
339 reqfile = self.vfs("requires", "w")
339 reqfile = self.vfs("requires", "w")
340 for r in sorted(self.requirements):
340 for r in sorted(self.requirements):
341 reqfile.write("%s\n" % r)
341 reqfile.write("%s\n" % r)
342 reqfile.close()
342 reqfile.close()
343
343
344 def _checknested(self, path):
344 def _checknested(self, path):
345 """Determine if path is a legal nested repository."""
345 """Determine if path is a legal nested repository."""
346 if not path.startswith(self.root):
346 if not path.startswith(self.root):
347 return False
347 return False
348 subpath = path[len(self.root) + 1:]
348 subpath = path[len(self.root) + 1:]
349 normsubpath = util.pconvert(subpath)
349 normsubpath = util.pconvert(subpath)
350
350
351 # XXX: Checking against the current working copy is wrong in
351 # XXX: Checking against the current working copy is wrong in
352 # the sense that it can reject things like
352 # the sense that it can reject things like
353 #
353 #
354 # $ hg cat -r 10 sub/x.txt
354 # $ hg cat -r 10 sub/x.txt
355 #
355 #
356 # if sub/ is no longer a subrepository in the working copy
356 # if sub/ is no longer a subrepository in the working copy
357 # parent revision.
357 # parent revision.
358 #
358 #
359 # However, it can of course also allow things that would have
359 # However, it can of course also allow things that would have
360 # been rejected before, such as the above cat command if sub/
360 # been rejected before, such as the above cat command if sub/
361 # is a subrepository now, but was a normal directory before.
361 # is a subrepository now, but was a normal directory before.
362 # The old path auditor would have rejected by mistake since it
362 # The old path auditor would have rejected by mistake since it
363 # panics when it sees sub/.hg/.
363 # panics when it sees sub/.hg/.
364 #
364 #
365 # All in all, checking against the working copy seems sensible
365 # All in all, checking against the working copy seems sensible
366 # since we want to prevent access to nested repositories on
366 # since we want to prevent access to nested repositories on
367 # the filesystem *now*.
367 # the filesystem *now*.
368 ctx = self[None]
368 ctx = self[None]
369 parts = util.splitpath(subpath)
369 parts = util.splitpath(subpath)
370 while parts:
370 while parts:
371 prefix = '/'.join(parts)
371 prefix = '/'.join(parts)
372 if prefix in ctx.substate:
372 if prefix in ctx.substate:
373 if prefix == normsubpath:
373 if prefix == normsubpath:
374 return True
374 return True
375 else:
375 else:
376 sub = ctx.sub(prefix)
376 sub = ctx.sub(prefix)
377 return sub.checknested(subpath[len(prefix) + 1:])
377 return sub.checknested(subpath[len(prefix) + 1:])
378 else:
378 else:
379 parts.pop()
379 parts.pop()
380 return False
380 return False
381
381
382 def peer(self):
382 def peer(self):
383 return localpeer(self) # not cached to avoid reference cycle
383 return localpeer(self) # not cached to avoid reference cycle
384
384
385 def unfiltered(self):
385 def unfiltered(self):
386 """Return unfiltered version of the repository
386 """Return unfiltered version of the repository
387
387
388 Intended to be overwritten by filtered repo."""
388 Intended to be overwritten by filtered repo."""
389 return self
389 return self
390
390
391 def filtered(self, name):
391 def filtered(self, name):
392 """Return a filtered version of a repository"""
392 """Return a filtered version of a repository"""
393 # build a new class with the mixin and the current class
393 # build a new class with the mixin and the current class
394 # (possibly subclass of the repo)
394 # (possibly subclass of the repo)
395 class proxycls(repoview.repoview, self.unfiltered().__class__):
395 class proxycls(repoview.repoview, self.unfiltered().__class__):
396 pass
396 pass
397 return proxycls(self, name)
397 return proxycls(self, name)
398
398
399 @repofilecache('bookmarks')
399 @repofilecache('bookmarks')
400 def _bookmarks(self):
400 def _bookmarks(self):
401 return bookmarks.bmstore(self)
401 return bookmarks.bmstore(self)
402
402
403 @repofilecache('bookmarks.current')
403 @repofilecache('bookmarks.current')
404 def _bookmarkcurrent(self):
404 def _bookmarkcurrent(self):
405 return bookmarks.readcurrent(self)
405 return bookmarks.readcurrent(self)
406
406
407 def bookmarkheads(self, bookmark):
407 def bookmarkheads(self, bookmark):
408 name = bookmark.split('@', 1)[0]
408 name = bookmark.split('@', 1)[0]
409 heads = []
409 heads = []
410 for mark, n in self._bookmarks.iteritems():
410 for mark, n in self._bookmarks.iteritems():
411 if mark.split('@', 1)[0] == name:
411 if mark.split('@', 1)[0] == name:
412 heads.append(n)
412 heads.append(n)
413 return heads
413 return heads
414
414
415 @storecache('phaseroots')
415 @storecache('phaseroots')
416 def _phasecache(self):
416 def _phasecache(self):
417 return phases.phasecache(self, self._phasedefaults)
417 return phases.phasecache(self, self._phasedefaults)
418
418
419 @storecache('obsstore')
419 @storecache('obsstore')
420 def obsstore(self):
420 def obsstore(self):
421 # read default format for new obsstore.
421 # read default format for new obsstore.
422 defaultformat = self.ui.configint('format', 'obsstore-version', None)
422 defaultformat = self.ui.configint('format', 'obsstore-version', None)
423 # rely on obsstore class default when possible.
423 # rely on obsstore class default when possible.
424 kwargs = {}
424 kwargs = {}
425 if defaultformat is not None:
425 if defaultformat is not None:
426 kwargs['defaultformat'] = defaultformat
426 kwargs['defaultformat'] = defaultformat
427 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
427 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
428 store = obsolete.obsstore(self.svfs, readonly=readonly,
428 store = obsolete.obsstore(self.svfs, readonly=readonly,
429 **kwargs)
429 **kwargs)
430 if store and readonly:
430 if store and readonly:
431 self.ui.warn(
431 self.ui.warn(
432 _('obsolete feature not enabled but %i markers found!\n')
432 _('obsolete feature not enabled but %i markers found!\n')
433 % len(list(store)))
433 % len(list(store)))
434 return store
434 return store
435
435
436 @storecache('00changelog.i')
436 @storecache('00changelog.i')
437 def changelog(self):
437 def changelog(self):
438 c = changelog.changelog(self.svfs)
438 c = changelog.changelog(self.svfs)
439 if 'HG_PENDING' in os.environ:
439 if 'HG_PENDING' in os.environ:
440 p = os.environ['HG_PENDING']
440 p = os.environ['HG_PENDING']
441 if p.startswith(self.root):
441 if p.startswith(self.root):
442 c.readpending('00changelog.i.a')
442 c.readpending('00changelog.i.a')
443 return c
443 return c
444
444
445 @storecache('00manifest.i')
445 @storecache('00manifest.i')
446 def manifest(self):
446 def manifest(self):
447 return manifest.manifest(self.svfs)
447 return manifest.manifest(self.svfs)
448
448
449 @repofilecache('dirstate')
449 @repofilecache('dirstate')
450 def dirstate(self):
450 def dirstate(self):
451 warned = [0]
451 warned = [0]
452 def validate(node):
452 def validate(node):
453 try:
453 try:
454 self.changelog.rev(node)
454 self.changelog.rev(node)
455 return node
455 return node
456 except error.LookupError:
456 except error.LookupError:
457 if not warned[0]:
457 if not warned[0]:
458 warned[0] = True
458 warned[0] = True
459 self.ui.warn(_("warning: ignoring unknown"
459 self.ui.warn(_("warning: ignoring unknown"
460 " working parent %s!\n") % short(node))
460 " working parent %s!\n") % short(node))
461 return nullid
461 return nullid
462
462
463 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
463 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
464
464
465 def __getitem__(self, changeid):
465 def __getitem__(self, changeid):
466 if changeid is None:
466 if changeid is None:
467 return context.workingctx(self)
467 return context.workingctx(self)
468 if isinstance(changeid, slice):
468 if isinstance(changeid, slice):
469 return [context.changectx(self, i)
469 return [context.changectx(self, i)
470 for i in xrange(*changeid.indices(len(self)))
470 for i in xrange(*changeid.indices(len(self)))
471 if i not in self.changelog.filteredrevs]
471 if i not in self.changelog.filteredrevs]
472 return context.changectx(self, changeid)
472 return context.changectx(self, changeid)
473
473
474 def __contains__(self, changeid):
474 def __contains__(self, changeid):
475 try:
475 try:
476 self[changeid]
476 self[changeid]
477 return True
477 return True
478 except error.RepoLookupError:
478 except error.RepoLookupError:
479 return False
479 return False
480
480
481 def __nonzero__(self):
481 def __nonzero__(self):
482 return True
482 return True
483
483
484 def __len__(self):
484 def __len__(self):
485 return len(self.changelog)
485 return len(self.changelog)
486
486
487 def __iter__(self):
487 def __iter__(self):
488 return iter(self.changelog)
488 return iter(self.changelog)
489
489
490 def revs(self, expr, *args):
490 def revs(self, expr, *args):
491 '''Return a list of revisions matching the given revset'''
491 '''Return a list of revisions matching the given revset'''
492 expr = revset.formatspec(expr, *args)
492 expr = revset.formatspec(expr, *args)
493 m = revset.match(None, expr)
493 m = revset.match(None, expr)
494 return m(self)
494 return m(self)
495
495
496 def set(self, expr, *args):
496 def set(self, expr, *args):
497 '''
497 '''
498 Yield a context for each matching revision, after doing arg
498 Yield a context for each matching revision, after doing arg
499 replacement via revset.formatspec
499 replacement via revset.formatspec
500 '''
500 '''
501 for r in self.revs(expr, *args):
501 for r in self.revs(expr, *args):
502 yield self[r]
502 yield self[r]
503
503
504 def url(self):
504 def url(self):
505 return 'file:' + self.root
505 return 'file:' + self.root
506
506
507 def hook(self, name, throw=False, **args):
507 def hook(self, name, throw=False, **args):
508 """Call a hook, passing this repo instance.
508 """Call a hook, passing this repo instance.
509
509
510 This a convenience method to aid invoking hooks. Extensions likely
510 This a convenience method to aid invoking hooks. Extensions likely
511 won't call this unless they have registered a custom hook or are
511 won't call this unless they have registered a custom hook or are
512 replacing code that is expected to call a hook.
512 replacing code that is expected to call a hook.
513 """
513 """
514 return hook.hook(self.ui, self, name, throw, **args)
514 return hook.hook(self.ui, self, name, throw, **args)
515
515
516 @unfilteredmethod
516 @unfilteredmethod
517 def _tag(self, names, node, message, local, user, date, extra={},
517 def _tag(self, names, node, message, local, user, date, extra={},
518 editor=False):
518 editor=False):
519 if isinstance(names, str):
519 if isinstance(names, str):
520 names = (names,)
520 names = (names,)
521
521
522 branches = self.branchmap()
522 branches = self.branchmap()
523 for name in names:
523 for name in names:
524 self.hook('pretag', throw=True, node=hex(node), tag=name,
524 self.hook('pretag', throw=True, node=hex(node), tag=name,
525 local=local)
525 local=local)
526 if name in branches:
526 if name in branches:
527 self.ui.warn(_("warning: tag %s conflicts with existing"
527 self.ui.warn(_("warning: tag %s conflicts with existing"
528 " branch name\n") % name)
528 " branch name\n") % name)
529
529
530 def writetags(fp, names, munge, prevtags):
530 def writetags(fp, names, munge, prevtags):
531 fp.seek(0, 2)
531 fp.seek(0, 2)
532 if prevtags and prevtags[-1] != '\n':
532 if prevtags and prevtags[-1] != '\n':
533 fp.write('\n')
533 fp.write('\n')
534 for name in names:
534 for name in names:
535 if munge:
535 if munge:
536 m = munge(name)
536 m = munge(name)
537 else:
537 else:
538 m = name
538 m = name
539
539
540 if (self._tagscache.tagtypes and
540 if (self._tagscache.tagtypes and
541 name in self._tagscache.tagtypes):
541 name in self._tagscache.tagtypes):
542 old = self.tags().get(name, nullid)
542 old = self.tags().get(name, nullid)
543 fp.write('%s %s\n' % (hex(old), m))
543 fp.write('%s %s\n' % (hex(old), m))
544 fp.write('%s %s\n' % (hex(node), m))
544 fp.write('%s %s\n' % (hex(node), m))
545 fp.close()
545 fp.close()
546
546
547 prevtags = ''
547 prevtags = ''
548 if local:
548 if local:
549 try:
549 try:
550 fp = self.vfs('localtags', 'r+')
550 fp = self.vfs('localtags', 'r+')
551 except IOError:
551 except IOError:
552 fp = self.vfs('localtags', 'a')
552 fp = self.vfs('localtags', 'a')
553 else:
553 else:
554 prevtags = fp.read()
554 prevtags = fp.read()
555
555
556 # local tags are stored in the current charset
556 # local tags are stored in the current charset
557 writetags(fp, names, None, prevtags)
557 writetags(fp, names, None, prevtags)
558 for name in names:
558 for name in names:
559 self.hook('tag', node=hex(node), tag=name, local=local)
559 self.hook('tag', node=hex(node), tag=name, local=local)
560 return
560 return
561
561
562 try:
562 try:
563 fp = self.wfile('.hgtags', 'rb+')
563 fp = self.wfile('.hgtags', 'rb+')
564 except IOError, e:
564 except IOError, e:
565 if e.errno != errno.ENOENT:
565 if e.errno != errno.ENOENT:
566 raise
566 raise
567 fp = self.wfile('.hgtags', 'ab')
567 fp = self.wfile('.hgtags', 'ab')
568 else:
568 else:
569 prevtags = fp.read()
569 prevtags = fp.read()
570
570
571 # committed tags are stored in UTF-8
571 # committed tags are stored in UTF-8
572 writetags(fp, names, encoding.fromlocal, prevtags)
572 writetags(fp, names, encoding.fromlocal, prevtags)
573
573
574 fp.close()
574 fp.close()
575
575
576 self.invalidatecaches()
576 self.invalidatecaches()
577
577
578 if '.hgtags' not in self.dirstate:
578 if '.hgtags' not in self.dirstate:
579 self[None].add(['.hgtags'])
579 self[None].add(['.hgtags'])
580
580
581 m = matchmod.exact(self.root, '', ['.hgtags'])
581 m = matchmod.exact(self.root, '', ['.hgtags'])
582 tagnode = self.commit(message, user, date, extra=extra, match=m,
582 tagnode = self.commit(message, user, date, extra=extra, match=m,
583 editor=editor)
583 editor=editor)
584
584
585 for name in names:
585 for name in names:
586 self.hook('tag', node=hex(node), tag=name, local=local)
586 self.hook('tag', node=hex(node), tag=name, local=local)
587
587
588 return tagnode
588 return tagnode
589
589
590 def tag(self, names, node, message, local, user, date, editor=False):
590 def tag(self, names, node, message, local, user, date, editor=False):
591 '''tag a revision with one or more symbolic names.
591 '''tag a revision with one or more symbolic names.
592
592
593 names is a list of strings or, when adding a single tag, names may be a
593 names is a list of strings or, when adding a single tag, names may be a
594 string.
594 string.
595
595
596 if local is True, the tags are stored in a per-repository file.
596 if local is True, the tags are stored in a per-repository file.
597 otherwise, they are stored in the .hgtags file, and a new
597 otherwise, they are stored in the .hgtags file, and a new
598 changeset is committed with the change.
598 changeset is committed with the change.
599
599
600 keyword arguments:
600 keyword arguments:
601
601
602 local: whether to store tags in non-version-controlled file
602 local: whether to store tags in non-version-controlled file
603 (default False)
603 (default False)
604
604
605 message: commit message to use if committing
605 message: commit message to use if committing
606
606
607 user: name of user to use if committing
607 user: name of user to use if committing
608
608
609 date: date tuple to use if committing'''
609 date: date tuple to use if committing'''
610
610
611 if not local:
611 if not local:
612 m = matchmod.exact(self.root, '', ['.hgtags'])
612 m = matchmod.exact(self.root, '', ['.hgtags'])
613 if util.any(self.status(match=m, unknown=True, ignored=True)):
613 if util.any(self.status(match=m, unknown=True, ignored=True)):
614 raise util.Abort(_('working copy of .hgtags is changed'),
614 raise util.Abort(_('working copy of .hgtags is changed'),
615 hint=_('please commit .hgtags manually'))
615 hint=_('please commit .hgtags manually'))
616
616
617 self.tags() # instantiate the cache
617 self.tags() # instantiate the cache
618 self._tag(names, node, message, local, user, date, editor=editor)
618 self._tag(names, node, message, local, user, date, editor=editor)
619
619
620 @filteredpropertycache
620 @filteredpropertycache
621 def _tagscache(self):
621 def _tagscache(self):
622 '''Returns a tagscache object that contains various tags related
622 '''Returns a tagscache object that contains various tags related
623 caches.'''
623 caches.'''
624
624
625 # This simplifies its cache management by having one decorated
625 # This simplifies its cache management by having one decorated
626 # function (this one) and the rest simply fetch things from it.
626 # function (this one) and the rest simply fetch things from it.
627 class tagscache(object):
627 class tagscache(object):
628 def __init__(self):
628 def __init__(self):
629 # These two define the set of tags for this repository. tags
629 # These two define the set of tags for this repository. tags
630 # maps tag name to node; tagtypes maps tag name to 'global' or
630 # maps tag name to node; tagtypes maps tag name to 'global' or
631 # 'local'. (Global tags are defined by .hgtags across all
631 # 'local'. (Global tags are defined by .hgtags across all
632 # heads, and local tags are defined in .hg/localtags.)
632 # heads, and local tags are defined in .hg/localtags.)
633 # They constitute the in-memory cache of tags.
633 # They constitute the in-memory cache of tags.
634 self.tags = self.tagtypes = None
634 self.tags = self.tagtypes = None
635
635
636 self.nodetagscache = self.tagslist = None
636 self.nodetagscache = self.tagslist = None
637
637
638 cache = tagscache()
638 cache = tagscache()
639 cache.tags, cache.tagtypes = self._findtags()
639 cache.tags, cache.tagtypes = self._findtags()
640
640
641 return cache
641 return cache
642
642
643 def tags(self):
643 def tags(self):
644 '''return a mapping of tag to node'''
644 '''return a mapping of tag to node'''
645 t = {}
645 t = {}
646 if self.changelog.filteredrevs:
646 if self.changelog.filteredrevs:
647 tags, tt = self._findtags()
647 tags, tt = self._findtags()
648 else:
648 else:
649 tags = self._tagscache.tags
649 tags = self._tagscache.tags
650 for k, v in tags.iteritems():
650 for k, v in tags.iteritems():
651 try:
651 try:
652 # ignore tags to unknown nodes
652 # ignore tags to unknown nodes
653 self.changelog.rev(v)
653 self.changelog.rev(v)
654 t[k] = v
654 t[k] = v
655 except (error.LookupError, ValueError):
655 except (error.LookupError, ValueError):
656 pass
656 pass
657 return t
657 return t
658
658
659 def _findtags(self):
659 def _findtags(self):
660 '''Do the hard work of finding tags. Return a pair of dicts
660 '''Do the hard work of finding tags. Return a pair of dicts
661 (tags, tagtypes) where tags maps tag name to node, and tagtypes
661 (tags, tagtypes) where tags maps tag name to node, and tagtypes
662 maps tag name to a string like \'global\' or \'local\'.
662 maps tag name to a string like \'global\' or \'local\'.
663 Subclasses or extensions are free to add their own tags, but
663 Subclasses or extensions are free to add their own tags, but
664 should be aware that the returned dicts will be retained for the
664 should be aware that the returned dicts will be retained for the
665 duration of the localrepo object.'''
665 duration of the localrepo object.'''
666
666
667 # XXX what tagtype should subclasses/extensions use? Currently
667 # XXX what tagtype should subclasses/extensions use? Currently
668 # mq and bookmarks add tags, but do not set the tagtype at all.
668 # mq and bookmarks add tags, but do not set the tagtype at all.
669 # Should each extension invent its own tag type? Should there
669 # Should each extension invent its own tag type? Should there
670 # be one tagtype for all such "virtual" tags? Or is the status
670 # be one tagtype for all such "virtual" tags? Or is the status
671 # quo fine?
671 # quo fine?
672
672
673 alltags = {} # map tag name to (node, hist)
673 alltags = {} # map tag name to (node, hist)
674 tagtypes = {}
674 tagtypes = {}
675
675
676 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
676 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
677 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
677 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
678
678
679 # Build the return dicts. Have to re-encode tag names because
679 # Build the return dicts. Have to re-encode tag names because
680 # the tags module always uses UTF-8 (in order not to lose info
680 # the tags module always uses UTF-8 (in order not to lose info
681 # writing to the cache), but the rest of Mercurial wants them in
681 # writing to the cache), but the rest of Mercurial wants them in
682 # local encoding.
682 # local encoding.
683 tags = {}
683 tags = {}
684 for (name, (node, hist)) in alltags.iteritems():
684 for (name, (node, hist)) in alltags.iteritems():
685 if node != nullid:
685 if node != nullid:
686 tags[encoding.tolocal(name)] = node
686 tags[encoding.tolocal(name)] = node
687 tags['tip'] = self.changelog.tip()
687 tags['tip'] = self.changelog.tip()
688 tagtypes = dict([(encoding.tolocal(name), value)
688 tagtypes = dict([(encoding.tolocal(name), value)
689 for (name, value) in tagtypes.iteritems()])
689 for (name, value) in tagtypes.iteritems()])
690 return (tags, tagtypes)
690 return (tags, tagtypes)
691
691
692 def tagtype(self, tagname):
692 def tagtype(self, tagname):
693 '''
693 '''
694 return the type of the given tag. result can be:
694 return the type of the given tag. result can be:
695
695
696 'local' : a local tag
696 'local' : a local tag
697 'global' : a global tag
697 'global' : a global tag
698 None : tag does not exist
698 None : tag does not exist
699 '''
699 '''
700
700
701 return self._tagscache.tagtypes.get(tagname)
701 return self._tagscache.tagtypes.get(tagname)
702
702
703 def tagslist(self):
703 def tagslist(self):
704 '''return a list of tags ordered by revision'''
704 '''return a list of tags ordered by revision'''
705 if not self._tagscache.tagslist:
705 if not self._tagscache.tagslist:
706 l = []
706 l = []
707 for t, n in self.tags().iteritems():
707 for t, n in self.tags().iteritems():
708 l.append((self.changelog.rev(n), t, n))
708 l.append((self.changelog.rev(n), t, n))
709 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
709 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
710
710
711 return self._tagscache.tagslist
711 return self._tagscache.tagslist
712
712
713 def nodetags(self, node):
713 def nodetags(self, node):
714 '''return the tags associated with a node'''
714 '''return the tags associated with a node'''
715 if not self._tagscache.nodetagscache:
715 if not self._tagscache.nodetagscache:
716 nodetagscache = {}
716 nodetagscache = {}
717 for t, n in self._tagscache.tags.iteritems():
717 for t, n in self._tagscache.tags.iteritems():
718 nodetagscache.setdefault(n, []).append(t)
718 nodetagscache.setdefault(n, []).append(t)
719 for tags in nodetagscache.itervalues():
719 for tags in nodetagscache.itervalues():
720 tags.sort()
720 tags.sort()
721 self._tagscache.nodetagscache = nodetagscache
721 self._tagscache.nodetagscache = nodetagscache
722 return self._tagscache.nodetagscache.get(node, [])
722 return self._tagscache.nodetagscache.get(node, [])
723
723
724 def nodebookmarks(self, node):
724 def nodebookmarks(self, node):
725 marks = []
725 marks = []
726 for bookmark, n in self._bookmarks.iteritems():
726 for bookmark, n in self._bookmarks.iteritems():
727 if n == node:
727 if n == node:
728 marks.append(bookmark)
728 marks.append(bookmark)
729 return sorted(marks)
729 return sorted(marks)
730
730
731 def branchmap(self):
731 def branchmap(self):
732 '''returns a dictionary {branch: [branchheads]} with branchheads
732 '''returns a dictionary {branch: [branchheads]} with branchheads
733 ordered by increasing revision number'''
733 ordered by increasing revision number'''
734 branchmap.updatecache(self)
734 branchmap.updatecache(self)
735 return self._branchcaches[self.filtername]
735 return self._branchcaches[self.filtername]
736
736
737 @unfilteredmethod
737 @unfilteredmethod
738 def revbranchcache(self):
738 def revbranchcache(self):
739 if not self._revbranchcache:
739 if not self._revbranchcache:
740 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
740 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
741 return self._revbranchcache
741 return self._revbranchcache
742
742
743 def branchtip(self, branch, ignoremissing=False):
743 def branchtip(self, branch, ignoremissing=False):
744 '''return the tip node for a given branch
744 '''return the tip node for a given branch
745
745
746 If ignoremissing is True, then this method will not raise an error.
746 If ignoremissing is True, then this method will not raise an error.
747 This is helpful for callers that only expect None for a missing branch
747 This is helpful for callers that only expect None for a missing branch
748 (e.g. namespace).
748 (e.g. namespace).
749
749
750 '''
750 '''
751 try:
751 try:
752 return self.branchmap().branchtip(branch)
752 return self.branchmap().branchtip(branch)
753 except KeyError:
753 except KeyError:
754 if not ignoremissing:
754 if not ignoremissing:
755 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
755 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
756 else:
756 else:
757 pass
757 pass
758
758
759 def lookup(self, key):
759 def lookup(self, key):
760 return self[key].node()
760 return self[key].node()
761
761
762 def lookupbranch(self, key, remote=None):
762 def lookupbranch(self, key, remote=None):
763 repo = remote or self
763 repo = remote or self
764 if key in repo.branchmap():
764 if key in repo.branchmap():
765 return key
765 return key
766
766
767 repo = (remote and remote.local()) and remote or self
767 repo = (remote and remote.local()) and remote or self
768 return repo[key].branch()
768 return repo[key].branch()
769
769
770 def known(self, nodes):
770 def known(self, nodes):
771 nm = self.changelog.nodemap
771 nm = self.changelog.nodemap
772 pc = self._phasecache
772 pc = self._phasecache
773 result = []
773 result = []
774 for n in nodes:
774 for n in nodes:
775 r = nm.get(n)
775 r = nm.get(n)
776 resp = not (r is None or pc.phase(self, r) >= phases.secret)
776 resp = not (r is None or pc.phase(self, r) >= phases.secret)
777 result.append(resp)
777 result.append(resp)
778 return result
778 return result
779
779
780 def local(self):
780 def local(self):
781 return self
781 return self
782
782
783 def cancopy(self):
783 def cancopy(self):
784 # so statichttprepo's override of local() works
784 # so statichttprepo's override of local() works
785 if not self.local():
785 if not self.local():
786 return False
786 return False
787 if not self.ui.configbool('phases', 'publish', True):
787 if not self.ui.configbool('phases', 'publish', True):
788 return True
788 return True
789 # if publishing we can't copy if there is filtered content
789 # if publishing we can't copy if there is filtered content
790 return not self.filtered('visible').changelog.filteredrevs
790 return not self.filtered('visible').changelog.filteredrevs
791
791
792 def shared(self):
792 def shared(self):
793 '''the type of shared repository (None if not shared)'''
793 '''the type of shared repository (None if not shared)'''
794 if self.sharedpath != self.path:
794 if self.sharedpath != self.path:
795 return 'store'
795 return 'store'
796 return None
796 return None
797
797
798 def join(self, f, *insidef):
798 def join(self, f, *insidef):
799 return self.vfs.join(os.path.join(f, *insidef))
799 return self.vfs.join(os.path.join(f, *insidef))
800
800
801 def wjoin(self, f, *insidef):
801 def wjoin(self, f, *insidef):
802 return self.vfs.reljoin(self.root, f, *insidef)
802 return self.vfs.reljoin(self.root, f, *insidef)
803
803
804 def file(self, f):
804 def file(self, f):
805 if f[0] == '/':
805 if f[0] == '/':
806 f = f[1:]
806 f = f[1:]
807 return filelog.filelog(self.svfs, f)
807 return filelog.filelog(self.svfs, f)
808
808
809 def changectx(self, changeid):
809 def changectx(self, changeid):
810 return self[changeid]
810 return self[changeid]
811
811
812 def parents(self, changeid=None):
812 def parents(self, changeid=None):
813 '''get list of changectxs for parents of changeid'''
813 '''get list of changectxs for parents of changeid'''
814 return self[changeid].parents()
814 return self[changeid].parents()
815
815
816 def setparents(self, p1, p2=nullid):
816 def setparents(self, p1, p2=nullid):
817 self.dirstate.beginparentchange()
817 self.dirstate.beginparentchange()
818 copies = self.dirstate.setparents(p1, p2)
818 copies = self.dirstate.setparents(p1, p2)
819 pctx = self[p1]
819 pctx = self[p1]
820 if copies:
820 if copies:
821 # Adjust copy records, the dirstate cannot do it, it
821 # Adjust copy records, the dirstate cannot do it, it
822 # requires access to parents manifests. Preserve them
822 # requires access to parents manifests. Preserve them
823 # only for entries added to first parent.
823 # only for entries added to first parent.
824 for f in copies:
824 for f in copies:
825 if f not in pctx and copies[f] in pctx:
825 if f not in pctx and copies[f] in pctx:
826 self.dirstate.copy(copies[f], f)
826 self.dirstate.copy(copies[f], f)
827 if p2 == nullid:
827 if p2 == nullid:
828 for f, s in sorted(self.dirstate.copies().items()):
828 for f, s in sorted(self.dirstate.copies().items()):
829 if f not in pctx and s not in pctx:
829 if f not in pctx and s not in pctx:
830 self.dirstate.copy(None, f)
830 self.dirstate.copy(None, f)
831 self.dirstate.endparentchange()
831 self.dirstate.endparentchange()
832
832
833 def filectx(self, path, changeid=None, fileid=None):
833 def filectx(self, path, changeid=None, fileid=None):
834 """changeid can be a changeset revision, node, or tag.
834 """changeid can be a changeset revision, node, or tag.
835 fileid can be a file revision or node."""
835 fileid can be a file revision or node."""
836 return context.filectx(self, path, changeid, fileid)
836 return context.filectx(self, path, changeid, fileid)
837
837
838 def getcwd(self):
838 def getcwd(self):
839 return self.dirstate.getcwd()
839 return self.dirstate.getcwd()
840
840
841 def pathto(self, f, cwd=None):
841 def pathto(self, f, cwd=None):
842 return self.dirstate.pathto(f, cwd)
842 return self.dirstate.pathto(f, cwd)
843
843
844 def wfile(self, f, mode='r'):
844 def wfile(self, f, mode='r'):
845 return self.wvfs(f, mode)
845 return self.wvfs(f, mode)
846
846
847 def _link(self, f):
847 def _link(self, f):
848 return self.wvfs.islink(f)
848 return self.wvfs.islink(f)
849
849
850 def _loadfilter(self, filter):
850 def _loadfilter(self, filter):
851 if filter not in self.filterpats:
851 if filter not in self.filterpats:
852 l = []
852 l = []
853 for pat, cmd in self.ui.configitems(filter):
853 for pat, cmd in self.ui.configitems(filter):
854 if cmd == '!':
854 if cmd == '!':
855 continue
855 continue
856 mf = matchmod.match(self.root, '', [pat])
856 mf = matchmod.match(self.root, '', [pat])
857 fn = None
857 fn = None
858 params = cmd
858 params = cmd
859 for name, filterfn in self._datafilters.iteritems():
859 for name, filterfn in self._datafilters.iteritems():
860 if cmd.startswith(name):
860 if cmd.startswith(name):
861 fn = filterfn
861 fn = filterfn
862 params = cmd[len(name):].lstrip()
862 params = cmd[len(name):].lstrip()
863 break
863 break
864 if not fn:
864 if not fn:
865 fn = lambda s, c, **kwargs: util.filter(s, c)
865 fn = lambda s, c, **kwargs: util.filter(s, c)
866 # Wrap old filters not supporting keyword arguments
866 # Wrap old filters not supporting keyword arguments
867 if not inspect.getargspec(fn)[2]:
867 if not inspect.getargspec(fn)[2]:
868 oldfn = fn
868 oldfn = fn
869 fn = lambda s, c, **kwargs: oldfn(s, c)
869 fn = lambda s, c, **kwargs: oldfn(s, c)
870 l.append((mf, fn, params))
870 l.append((mf, fn, params))
871 self.filterpats[filter] = l
871 self.filterpats[filter] = l
872 return self.filterpats[filter]
872 return self.filterpats[filter]
873
873
874 def _filter(self, filterpats, filename, data):
874 def _filter(self, filterpats, filename, data):
875 for mf, fn, cmd in filterpats:
875 for mf, fn, cmd in filterpats:
876 if mf(filename):
876 if mf(filename):
877 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
877 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
878 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
878 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
879 break
879 break
880
880
881 return data
881 return data
882
882
883 @unfilteredpropertycache
883 @unfilteredpropertycache
884 def _encodefilterpats(self):
884 def _encodefilterpats(self):
885 return self._loadfilter('encode')
885 return self._loadfilter('encode')
886
886
887 @unfilteredpropertycache
887 @unfilteredpropertycache
888 def _decodefilterpats(self):
888 def _decodefilterpats(self):
889 return self._loadfilter('decode')
889 return self._loadfilter('decode')
890
890
891 def adddatafilter(self, name, filter):
891 def adddatafilter(self, name, filter):
892 self._datafilters[name] = filter
892 self._datafilters[name] = filter
893
893
894 def wread(self, filename):
894 def wread(self, filename):
895 if self._link(filename):
895 if self._link(filename):
896 data = self.wvfs.readlink(filename)
896 data = self.wvfs.readlink(filename)
897 else:
897 else:
898 data = self.wvfs.read(filename)
898 data = self.wvfs.read(filename)
899 return self._filter(self._encodefilterpats, filename, data)
899 return self._filter(self._encodefilterpats, filename, data)
900
900
901 def wwrite(self, filename, data, flags):
901 def wwrite(self, filename, data, flags):
902 data = self._filter(self._decodefilterpats, filename, data)
902 data = self._filter(self._decodefilterpats, filename, data)
903 if 'l' in flags:
903 if 'l' in flags:
904 self.wvfs.symlink(data, filename)
904 self.wvfs.symlink(data, filename)
905 else:
905 else:
906 self.wvfs.write(filename, data)
906 self.wvfs.write(filename, data)
907 if 'x' in flags:
907 if 'x' in flags:
908 self.wvfs.setflags(filename, False, True)
908 self.wvfs.setflags(filename, False, True)
909
909
910 def wwritedata(self, filename, data):
910 def wwritedata(self, filename, data):
911 return self._filter(self._decodefilterpats, filename, data)
911 return self._filter(self._decodefilterpats, filename, data)
912
912
913 def currenttransaction(self):
913 def currenttransaction(self):
914 """return the current transaction or None if non exists"""
914 """return the current transaction or None if non exists"""
915 if self._transref:
915 if self._transref:
916 tr = self._transref()
916 tr = self._transref()
917 else:
917 else:
918 tr = None
918 tr = None
919
919
920 if tr and tr.running():
920 if tr and tr.running():
921 return tr
921 return tr
922 return None
922 return None
923
923
924 def transaction(self, desc, report=None):
924 def transaction(self, desc, report=None):
925 if (self.ui.configbool('devel', 'all')
925 if (self.ui.configbool('devel', 'all')
926 or self.ui.configbool('devel', 'check-locks')):
926 or self.ui.configbool('devel', 'check-locks')):
927 l = self._lockref and self._lockref()
927 l = self._lockref and self._lockref()
928 if l is None or not l.held:
928 if l is None or not l.held:
929 scmutil.develwarn(self.ui, 'transaction with no lock\n')
929 scmutil.develwarn(self.ui, 'transaction with no lock')
930 tr = self.currenttransaction()
930 tr = self.currenttransaction()
931 if tr is not None:
931 if tr is not None:
932 return tr.nest()
932 return tr.nest()
933
933
934 # abort here if the journal already exists
934 # abort here if the journal already exists
935 if self.svfs.exists("journal"):
935 if self.svfs.exists("journal"):
936 raise error.RepoError(
936 raise error.RepoError(
937 _("abandoned transaction found"),
937 _("abandoned transaction found"),
938 hint=_("run 'hg recover' to clean up transaction"))
938 hint=_("run 'hg recover' to clean up transaction"))
939
939
940 self.hook('pretxnopen', throw=True, txnname=desc)
940 self.hook('pretxnopen', throw=True, txnname=desc)
941
941
942 self._writejournal(desc)
942 self._writejournal(desc)
943 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
943 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
944 if report:
944 if report:
945 rp = report
945 rp = report
946 else:
946 else:
947 rp = self.ui.warn
947 rp = self.ui.warn
948 vfsmap = {'plain': self.vfs} # root of .hg/
948 vfsmap = {'plain': self.vfs} # root of .hg/
949 # we must avoid cyclic reference between repo and transaction.
949 # we must avoid cyclic reference between repo and transaction.
950 reporef = weakref.ref(self)
950 reporef = weakref.ref(self)
951 def validate(tr):
951 def validate(tr):
952 """will run pre-closing hooks"""
952 """will run pre-closing hooks"""
953 pending = lambda: tr.writepending() and self.root or ""
953 pending = lambda: tr.writepending() and self.root or ""
954 reporef().hook('pretxnclose', throw=True, pending=pending,
954 reporef().hook('pretxnclose', throw=True, pending=pending,
955 xnname=desc, **tr.hookargs)
955 xnname=desc, **tr.hookargs)
956
956
957 tr = transaction.transaction(rp, self.sopener, vfsmap,
957 tr = transaction.transaction(rp, self.sopener, vfsmap,
958 "journal",
958 "journal",
959 "undo",
959 "undo",
960 aftertrans(renames),
960 aftertrans(renames),
961 self.store.createmode,
961 self.store.createmode,
962 validator=validate)
962 validator=validate)
963
963
964 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
964 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
965 tr.hookargs['TXNID'] = trid
965 tr.hookargs['TXNID'] = trid
966 # note: writing the fncache only during finalize mean that the file is
966 # note: writing the fncache only during finalize mean that the file is
967 # outdated when running hooks. As fncache is used for streaming clone,
967 # outdated when running hooks. As fncache is used for streaming clone,
968 # this is not expected to break anything that happen during the hooks.
968 # this is not expected to break anything that happen during the hooks.
969 tr.addfinalize('flush-fncache', self.store.write)
969 tr.addfinalize('flush-fncache', self.store.write)
970 def txnclosehook(tr2):
970 def txnclosehook(tr2):
971 """To be run if transaction is successful, will schedule a hook run
971 """To be run if transaction is successful, will schedule a hook run
972 """
972 """
973 def hook():
973 def hook():
974 reporef().hook('txnclose', throw=False, txnname=desc,
974 reporef().hook('txnclose', throw=False, txnname=desc,
975 **tr2.hookargs)
975 **tr2.hookargs)
976 reporef()._afterlock(hook)
976 reporef()._afterlock(hook)
977 tr.addfinalize('txnclose-hook', txnclosehook)
977 tr.addfinalize('txnclose-hook', txnclosehook)
978 self._transref = weakref.ref(tr)
978 self._transref = weakref.ref(tr)
979 return tr
979 return tr
980
980
981 def _journalfiles(self):
981 def _journalfiles(self):
982 return ((self.svfs, 'journal'),
982 return ((self.svfs, 'journal'),
983 (self.vfs, 'journal.dirstate'),
983 (self.vfs, 'journal.dirstate'),
984 (self.vfs, 'journal.branch'),
984 (self.vfs, 'journal.branch'),
985 (self.vfs, 'journal.desc'),
985 (self.vfs, 'journal.desc'),
986 (self.vfs, 'journal.bookmarks'),
986 (self.vfs, 'journal.bookmarks'),
987 (self.svfs, 'journal.phaseroots'))
987 (self.svfs, 'journal.phaseroots'))
988
988
989 def undofiles(self):
989 def undofiles(self):
990 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
990 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
991
991
992 def _writejournal(self, desc):
992 def _writejournal(self, desc):
993 self.vfs.write("journal.dirstate",
993 self.vfs.write("journal.dirstate",
994 self.vfs.tryread("dirstate"))
994 self.vfs.tryread("dirstate"))
995 self.vfs.write("journal.branch",
995 self.vfs.write("journal.branch",
996 encoding.fromlocal(self.dirstate.branch()))
996 encoding.fromlocal(self.dirstate.branch()))
997 self.vfs.write("journal.desc",
997 self.vfs.write("journal.desc",
998 "%d\n%s\n" % (len(self), desc))
998 "%d\n%s\n" % (len(self), desc))
999 self.vfs.write("journal.bookmarks",
999 self.vfs.write("journal.bookmarks",
1000 self.vfs.tryread("bookmarks"))
1000 self.vfs.tryread("bookmarks"))
1001 self.svfs.write("journal.phaseroots",
1001 self.svfs.write("journal.phaseroots",
1002 self.svfs.tryread("phaseroots"))
1002 self.svfs.tryread("phaseroots"))
1003
1003
1004 def recover(self):
1004 def recover(self):
1005 lock = self.lock()
1005 lock = self.lock()
1006 try:
1006 try:
1007 if self.svfs.exists("journal"):
1007 if self.svfs.exists("journal"):
1008 self.ui.status(_("rolling back interrupted transaction\n"))
1008 self.ui.status(_("rolling back interrupted transaction\n"))
1009 vfsmap = {'': self.svfs,
1009 vfsmap = {'': self.svfs,
1010 'plain': self.vfs,}
1010 'plain': self.vfs,}
1011 transaction.rollback(self.svfs, vfsmap, "journal",
1011 transaction.rollback(self.svfs, vfsmap, "journal",
1012 self.ui.warn)
1012 self.ui.warn)
1013 self.invalidate()
1013 self.invalidate()
1014 return True
1014 return True
1015 else:
1015 else:
1016 self.ui.warn(_("no interrupted transaction available\n"))
1016 self.ui.warn(_("no interrupted transaction available\n"))
1017 return False
1017 return False
1018 finally:
1018 finally:
1019 lock.release()
1019 lock.release()
1020
1020
1021 def rollback(self, dryrun=False, force=False):
1021 def rollback(self, dryrun=False, force=False):
1022 wlock = lock = None
1022 wlock = lock = None
1023 try:
1023 try:
1024 wlock = self.wlock()
1024 wlock = self.wlock()
1025 lock = self.lock()
1025 lock = self.lock()
1026 if self.svfs.exists("undo"):
1026 if self.svfs.exists("undo"):
1027 return self._rollback(dryrun, force)
1027 return self._rollback(dryrun, force)
1028 else:
1028 else:
1029 self.ui.warn(_("no rollback information available\n"))
1029 self.ui.warn(_("no rollback information available\n"))
1030 return 1
1030 return 1
1031 finally:
1031 finally:
1032 release(lock, wlock)
1032 release(lock, wlock)
1033
1033
1034 @unfilteredmethod # Until we get smarter cache management
1034 @unfilteredmethod # Until we get smarter cache management
1035 def _rollback(self, dryrun, force):
1035 def _rollback(self, dryrun, force):
1036 ui = self.ui
1036 ui = self.ui
1037 try:
1037 try:
1038 args = self.vfs.read('undo.desc').splitlines()
1038 args = self.vfs.read('undo.desc').splitlines()
1039 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1039 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1040 if len(args) >= 3:
1040 if len(args) >= 3:
1041 detail = args[2]
1041 detail = args[2]
1042 oldtip = oldlen - 1
1042 oldtip = oldlen - 1
1043
1043
1044 if detail and ui.verbose:
1044 if detail and ui.verbose:
1045 msg = (_('repository tip rolled back to revision %s'
1045 msg = (_('repository tip rolled back to revision %s'
1046 ' (undo %s: %s)\n')
1046 ' (undo %s: %s)\n')
1047 % (oldtip, desc, detail))
1047 % (oldtip, desc, detail))
1048 else:
1048 else:
1049 msg = (_('repository tip rolled back to revision %s'
1049 msg = (_('repository tip rolled back to revision %s'
1050 ' (undo %s)\n')
1050 ' (undo %s)\n')
1051 % (oldtip, desc))
1051 % (oldtip, desc))
1052 except IOError:
1052 except IOError:
1053 msg = _('rolling back unknown transaction\n')
1053 msg = _('rolling back unknown transaction\n')
1054 desc = None
1054 desc = None
1055
1055
1056 if not force and self['.'] != self['tip'] and desc == 'commit':
1056 if not force and self['.'] != self['tip'] and desc == 'commit':
1057 raise util.Abort(
1057 raise util.Abort(
1058 _('rollback of last commit while not checked out '
1058 _('rollback of last commit while not checked out '
1059 'may lose data'), hint=_('use -f to force'))
1059 'may lose data'), hint=_('use -f to force'))
1060
1060
1061 ui.status(msg)
1061 ui.status(msg)
1062 if dryrun:
1062 if dryrun:
1063 return 0
1063 return 0
1064
1064
1065 parents = self.dirstate.parents()
1065 parents = self.dirstate.parents()
1066 self.destroying()
1066 self.destroying()
1067 vfsmap = {'plain': self.vfs, '': self.svfs}
1067 vfsmap = {'plain': self.vfs, '': self.svfs}
1068 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1068 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1069 if self.vfs.exists('undo.bookmarks'):
1069 if self.vfs.exists('undo.bookmarks'):
1070 self.vfs.rename('undo.bookmarks', 'bookmarks')
1070 self.vfs.rename('undo.bookmarks', 'bookmarks')
1071 if self.svfs.exists('undo.phaseroots'):
1071 if self.svfs.exists('undo.phaseroots'):
1072 self.svfs.rename('undo.phaseroots', 'phaseroots')
1072 self.svfs.rename('undo.phaseroots', 'phaseroots')
1073 self.invalidate()
1073 self.invalidate()
1074
1074
1075 parentgone = (parents[0] not in self.changelog.nodemap or
1075 parentgone = (parents[0] not in self.changelog.nodemap or
1076 parents[1] not in self.changelog.nodemap)
1076 parents[1] not in self.changelog.nodemap)
1077 if parentgone:
1077 if parentgone:
1078 self.vfs.rename('undo.dirstate', 'dirstate')
1078 self.vfs.rename('undo.dirstate', 'dirstate')
1079 try:
1079 try:
1080 branch = self.vfs.read('undo.branch')
1080 branch = self.vfs.read('undo.branch')
1081 self.dirstate.setbranch(encoding.tolocal(branch))
1081 self.dirstate.setbranch(encoding.tolocal(branch))
1082 except IOError:
1082 except IOError:
1083 ui.warn(_('named branch could not be reset: '
1083 ui.warn(_('named branch could not be reset: '
1084 'current branch is still \'%s\'\n')
1084 'current branch is still \'%s\'\n')
1085 % self.dirstate.branch())
1085 % self.dirstate.branch())
1086
1086
1087 self.dirstate.invalidate()
1087 self.dirstate.invalidate()
1088 parents = tuple([p.rev() for p in self.parents()])
1088 parents = tuple([p.rev() for p in self.parents()])
1089 if len(parents) > 1:
1089 if len(parents) > 1:
1090 ui.status(_('working directory now based on '
1090 ui.status(_('working directory now based on '
1091 'revisions %d and %d\n') % parents)
1091 'revisions %d and %d\n') % parents)
1092 else:
1092 else:
1093 ui.status(_('working directory now based on '
1093 ui.status(_('working directory now based on '
1094 'revision %d\n') % parents)
1094 'revision %d\n') % parents)
1095 # TODO: if we know which new heads may result from this rollback, pass
1095 # TODO: if we know which new heads may result from this rollback, pass
1096 # them to destroy(), which will prevent the branchhead cache from being
1096 # them to destroy(), which will prevent the branchhead cache from being
1097 # invalidated.
1097 # invalidated.
1098 self.destroyed()
1098 self.destroyed()
1099 return 0
1099 return 0
1100
1100
1101 def invalidatecaches(self):
1101 def invalidatecaches(self):
1102
1102
1103 if '_tagscache' in vars(self):
1103 if '_tagscache' in vars(self):
1104 # can't use delattr on proxy
1104 # can't use delattr on proxy
1105 del self.__dict__['_tagscache']
1105 del self.__dict__['_tagscache']
1106
1106
1107 self.unfiltered()._branchcaches.clear()
1107 self.unfiltered()._branchcaches.clear()
1108 self.invalidatevolatilesets()
1108 self.invalidatevolatilesets()
1109
1109
1110 def invalidatevolatilesets(self):
1110 def invalidatevolatilesets(self):
1111 self.filteredrevcache.clear()
1111 self.filteredrevcache.clear()
1112 obsolete.clearobscaches(self)
1112 obsolete.clearobscaches(self)
1113
1113
1114 def invalidatedirstate(self):
1114 def invalidatedirstate(self):
1115 '''Invalidates the dirstate, causing the next call to dirstate
1115 '''Invalidates the dirstate, causing the next call to dirstate
1116 to check if it was modified since the last time it was read,
1116 to check if it was modified since the last time it was read,
1117 rereading it if it has.
1117 rereading it if it has.
1118
1118
1119 This is different to dirstate.invalidate() that it doesn't always
1119 This is different to dirstate.invalidate() that it doesn't always
1120 rereads the dirstate. Use dirstate.invalidate() if you want to
1120 rereads the dirstate. Use dirstate.invalidate() if you want to
1121 explicitly read the dirstate again (i.e. restoring it to a previous
1121 explicitly read the dirstate again (i.e. restoring it to a previous
1122 known good state).'''
1122 known good state).'''
1123 if hasunfilteredcache(self, 'dirstate'):
1123 if hasunfilteredcache(self, 'dirstate'):
1124 for k in self.dirstate._filecache:
1124 for k in self.dirstate._filecache:
1125 try:
1125 try:
1126 delattr(self.dirstate, k)
1126 delattr(self.dirstate, k)
1127 except AttributeError:
1127 except AttributeError:
1128 pass
1128 pass
1129 delattr(self.unfiltered(), 'dirstate')
1129 delattr(self.unfiltered(), 'dirstate')
1130
1130
1131 def invalidate(self):
1131 def invalidate(self):
1132 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1132 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1133 for k in self._filecache:
1133 for k in self._filecache:
1134 # dirstate is invalidated separately in invalidatedirstate()
1134 # dirstate is invalidated separately in invalidatedirstate()
1135 if k == 'dirstate':
1135 if k == 'dirstate':
1136 continue
1136 continue
1137
1137
1138 try:
1138 try:
1139 delattr(unfiltered, k)
1139 delattr(unfiltered, k)
1140 except AttributeError:
1140 except AttributeError:
1141 pass
1141 pass
1142 self.invalidatecaches()
1142 self.invalidatecaches()
1143 self.store.invalidatecaches()
1143 self.store.invalidatecaches()
1144
1144
1145 def invalidateall(self):
1145 def invalidateall(self):
1146 '''Fully invalidates both store and non-store parts, causing the
1146 '''Fully invalidates both store and non-store parts, causing the
1147 subsequent operation to reread any outside changes.'''
1147 subsequent operation to reread any outside changes.'''
1148 # extension should hook this to invalidate its caches
1148 # extension should hook this to invalidate its caches
1149 self.invalidate()
1149 self.invalidate()
1150 self.invalidatedirstate()
1150 self.invalidatedirstate()
1151
1151
1152 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1152 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1153 try:
1153 try:
1154 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1154 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1155 except error.LockHeld, inst:
1155 except error.LockHeld, inst:
1156 if not wait:
1156 if not wait:
1157 raise
1157 raise
1158 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1158 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1159 (desc, inst.locker))
1159 (desc, inst.locker))
1160 # default to 600 seconds timeout
1160 # default to 600 seconds timeout
1161 l = lockmod.lock(vfs, lockname,
1161 l = lockmod.lock(vfs, lockname,
1162 int(self.ui.config("ui", "timeout", "600")),
1162 int(self.ui.config("ui", "timeout", "600")),
1163 releasefn, desc=desc)
1163 releasefn, desc=desc)
1164 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1164 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1165 if acquirefn:
1165 if acquirefn:
1166 acquirefn()
1166 acquirefn()
1167 return l
1167 return l
1168
1168
1169 def _afterlock(self, callback):
1169 def _afterlock(self, callback):
1170 """add a callback to the current repository lock.
1170 """add a callback to the current repository lock.
1171
1171
1172 The callback will be executed on lock release."""
1172 The callback will be executed on lock release."""
1173 l = self._lockref and self._lockref()
1173 l = self._lockref and self._lockref()
1174 if l:
1174 if l:
1175 l.postrelease.append(callback)
1175 l.postrelease.append(callback)
1176 else:
1176 else:
1177 callback()
1177 callback()
1178
1178
1179 def lock(self, wait=True):
1179 def lock(self, wait=True):
1180 '''Lock the repository store (.hg/store) and return a weak reference
1180 '''Lock the repository store (.hg/store) and return a weak reference
1181 to the lock. Use this before modifying the store (e.g. committing or
1181 to the lock. Use this before modifying the store (e.g. committing or
1182 stripping). If you are opening a transaction, get a lock as well.)
1182 stripping). If you are opening a transaction, get a lock as well.)
1183
1183
1184 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1184 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1185 'wlock' first to avoid a dead-lock hazard.'''
1185 'wlock' first to avoid a dead-lock hazard.'''
1186 l = self._lockref and self._lockref()
1186 l = self._lockref and self._lockref()
1187 if l is not None and l.held:
1187 if l is not None and l.held:
1188 l.lock()
1188 l.lock()
1189 return l
1189 return l
1190
1190
1191 def unlock():
1191 def unlock():
1192 for k, ce in self._filecache.items():
1192 for k, ce in self._filecache.items():
1193 if k == 'dirstate' or k not in self.__dict__:
1193 if k == 'dirstate' or k not in self.__dict__:
1194 continue
1194 continue
1195 ce.refresh()
1195 ce.refresh()
1196
1196
1197 l = self._lock(self.svfs, "lock", wait, unlock,
1197 l = self._lock(self.svfs, "lock", wait, unlock,
1198 self.invalidate, _('repository %s') % self.origroot)
1198 self.invalidate, _('repository %s') % self.origroot)
1199 self._lockref = weakref.ref(l)
1199 self._lockref = weakref.ref(l)
1200 return l
1200 return l
1201
1201
1202 def wlock(self, wait=True):
1202 def wlock(self, wait=True):
1203 '''Lock the non-store parts of the repository (everything under
1203 '''Lock the non-store parts of the repository (everything under
1204 .hg except .hg/store) and return a weak reference to the lock.
1204 .hg except .hg/store) and return a weak reference to the lock.
1205
1205
1206 Use this before modifying files in .hg.
1206 Use this before modifying files in .hg.
1207
1207
1208 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1208 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1209 'wlock' first to avoid a dead-lock hazard.'''
1209 'wlock' first to avoid a dead-lock hazard.'''
1210 l = self._wlockref and self._wlockref()
1210 l = self._wlockref and self._wlockref()
1211 if l is not None and l.held:
1211 if l is not None and l.held:
1212 l.lock()
1212 l.lock()
1213 return l
1213 return l
1214
1214
1215 if (self.ui.configbool('devel', 'all')
1215 if (self.ui.configbool('devel', 'all')
1216 or self.ui.configbool('devel', 'check-locks')):
1216 or self.ui.configbool('devel', 'check-locks')):
1217 l = self._lockref and self._lockref()
1217 l = self._lockref and self._lockref()
1218 if l is not None and l.held:
1218 if l is not None and l.held:
1219 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"\n')
1219 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1220
1220
1221 def unlock():
1221 def unlock():
1222 if self.dirstate.pendingparentchange():
1222 if self.dirstate.pendingparentchange():
1223 self.dirstate.invalidate()
1223 self.dirstate.invalidate()
1224 else:
1224 else:
1225 self.dirstate.write()
1225 self.dirstate.write()
1226
1226
1227 self._filecache['dirstate'].refresh()
1227 self._filecache['dirstate'].refresh()
1228
1228
1229 l = self._lock(self.vfs, "wlock", wait, unlock,
1229 l = self._lock(self.vfs, "wlock", wait, unlock,
1230 self.invalidatedirstate, _('working directory of %s') %
1230 self.invalidatedirstate, _('working directory of %s') %
1231 self.origroot)
1231 self.origroot)
1232 self._wlockref = weakref.ref(l)
1232 self._wlockref = weakref.ref(l)
1233 return l
1233 return l
1234
1234
1235 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1235 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1236 """
1236 """
1237 commit an individual file as part of a larger transaction
1237 commit an individual file as part of a larger transaction
1238 """
1238 """
1239
1239
1240 fname = fctx.path()
1240 fname = fctx.path()
1241 fparent1 = manifest1.get(fname, nullid)
1241 fparent1 = manifest1.get(fname, nullid)
1242 fparent2 = manifest2.get(fname, nullid)
1242 fparent2 = manifest2.get(fname, nullid)
1243 if isinstance(fctx, context.filectx):
1243 if isinstance(fctx, context.filectx):
1244 node = fctx.filenode()
1244 node = fctx.filenode()
1245 if node in [fparent1, fparent2]:
1245 if node in [fparent1, fparent2]:
1246 self.ui.debug('reusing %s filelog entry\n' % fname)
1246 self.ui.debug('reusing %s filelog entry\n' % fname)
1247 return node
1247 return node
1248
1248
1249 flog = self.file(fname)
1249 flog = self.file(fname)
1250 meta = {}
1250 meta = {}
1251 copy = fctx.renamed()
1251 copy = fctx.renamed()
1252 if copy and copy[0] != fname:
1252 if copy and copy[0] != fname:
1253 # Mark the new revision of this file as a copy of another
1253 # Mark the new revision of this file as a copy of another
1254 # file. This copy data will effectively act as a parent
1254 # file. This copy data will effectively act as a parent
1255 # of this new revision. If this is a merge, the first
1255 # of this new revision. If this is a merge, the first
1256 # parent will be the nullid (meaning "look up the copy data")
1256 # parent will be the nullid (meaning "look up the copy data")
1257 # and the second one will be the other parent. For example:
1257 # and the second one will be the other parent. For example:
1258 #
1258 #
1259 # 0 --- 1 --- 3 rev1 changes file foo
1259 # 0 --- 1 --- 3 rev1 changes file foo
1260 # \ / rev2 renames foo to bar and changes it
1260 # \ / rev2 renames foo to bar and changes it
1261 # \- 2 -/ rev3 should have bar with all changes and
1261 # \- 2 -/ rev3 should have bar with all changes and
1262 # should record that bar descends from
1262 # should record that bar descends from
1263 # bar in rev2 and foo in rev1
1263 # bar in rev2 and foo in rev1
1264 #
1264 #
1265 # this allows this merge to succeed:
1265 # this allows this merge to succeed:
1266 #
1266 #
1267 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1267 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1268 # \ / merging rev3 and rev4 should use bar@rev2
1268 # \ / merging rev3 and rev4 should use bar@rev2
1269 # \- 2 --- 4 as the merge base
1269 # \- 2 --- 4 as the merge base
1270 #
1270 #
1271
1271
1272 cfname = copy[0]
1272 cfname = copy[0]
1273 crev = manifest1.get(cfname)
1273 crev = manifest1.get(cfname)
1274 newfparent = fparent2
1274 newfparent = fparent2
1275
1275
1276 if manifest2: # branch merge
1276 if manifest2: # branch merge
1277 if fparent2 == nullid or crev is None: # copied on remote side
1277 if fparent2 == nullid or crev is None: # copied on remote side
1278 if cfname in manifest2:
1278 if cfname in manifest2:
1279 crev = manifest2[cfname]
1279 crev = manifest2[cfname]
1280 newfparent = fparent1
1280 newfparent = fparent1
1281
1281
1282 # Here, we used to search backwards through history to try to find
1282 # Here, we used to search backwards through history to try to find
1283 # where the file copy came from if the source of a copy was not in
1283 # where the file copy came from if the source of a copy was not in
1284 # the parent directory. However, this doesn't actually make sense to
1284 # the parent directory. However, this doesn't actually make sense to
1285 # do (what does a copy from something not in your working copy even
1285 # do (what does a copy from something not in your working copy even
1286 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1286 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1287 # the user that copy information was dropped, so if they didn't
1287 # the user that copy information was dropped, so if they didn't
1288 # expect this outcome it can be fixed, but this is the correct
1288 # expect this outcome it can be fixed, but this is the correct
1289 # behavior in this circumstance.
1289 # behavior in this circumstance.
1290
1290
1291 if crev:
1291 if crev:
1292 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1292 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1293 meta["copy"] = cfname
1293 meta["copy"] = cfname
1294 meta["copyrev"] = hex(crev)
1294 meta["copyrev"] = hex(crev)
1295 fparent1, fparent2 = nullid, newfparent
1295 fparent1, fparent2 = nullid, newfparent
1296 else:
1296 else:
1297 self.ui.warn(_("warning: can't find ancestor for '%s' "
1297 self.ui.warn(_("warning: can't find ancestor for '%s' "
1298 "copied from '%s'!\n") % (fname, cfname))
1298 "copied from '%s'!\n") % (fname, cfname))
1299
1299
1300 elif fparent1 == nullid:
1300 elif fparent1 == nullid:
1301 fparent1, fparent2 = fparent2, nullid
1301 fparent1, fparent2 = fparent2, nullid
1302 elif fparent2 != nullid:
1302 elif fparent2 != nullid:
1303 # is one parent an ancestor of the other?
1303 # is one parent an ancestor of the other?
1304 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1304 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1305 if fparent1 in fparentancestors:
1305 if fparent1 in fparentancestors:
1306 fparent1, fparent2 = fparent2, nullid
1306 fparent1, fparent2 = fparent2, nullid
1307 elif fparent2 in fparentancestors:
1307 elif fparent2 in fparentancestors:
1308 fparent2 = nullid
1308 fparent2 = nullid
1309
1309
1310 # is the file changed?
1310 # is the file changed?
1311 text = fctx.data()
1311 text = fctx.data()
1312 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1312 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1313 changelist.append(fname)
1313 changelist.append(fname)
1314 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1314 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1315 # are just the flags changed during merge?
1315 # are just the flags changed during merge?
1316 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1316 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1317 changelist.append(fname)
1317 changelist.append(fname)
1318
1318
1319 return fparent1
1319 return fparent1
1320
1320
1321 @unfilteredmethod
1321 @unfilteredmethod
1322 def commit(self, text="", user=None, date=None, match=None, force=False,
1322 def commit(self, text="", user=None, date=None, match=None, force=False,
1323 editor=False, extra={}):
1323 editor=False, extra={}):
1324 """Add a new revision to current repository.
1324 """Add a new revision to current repository.
1325
1325
1326 Revision information is gathered from the working directory,
1326 Revision information is gathered from the working directory,
1327 match can be used to filter the committed files. If editor is
1327 match can be used to filter the committed files. If editor is
1328 supplied, it is called to get a commit message.
1328 supplied, it is called to get a commit message.
1329 """
1329 """
1330
1330
1331 def fail(f, msg):
1331 def fail(f, msg):
1332 raise util.Abort('%s: %s' % (f, msg))
1332 raise util.Abort('%s: %s' % (f, msg))
1333
1333
1334 if not match:
1334 if not match:
1335 match = matchmod.always(self.root, '')
1335 match = matchmod.always(self.root, '')
1336
1336
1337 if not force:
1337 if not force:
1338 vdirs = []
1338 vdirs = []
1339 match.explicitdir = vdirs.append
1339 match.explicitdir = vdirs.append
1340 match.bad = fail
1340 match.bad = fail
1341
1341
1342 wlock = self.wlock()
1342 wlock = self.wlock()
1343 try:
1343 try:
1344 wctx = self[None]
1344 wctx = self[None]
1345 merge = len(wctx.parents()) > 1
1345 merge = len(wctx.parents()) > 1
1346
1346
1347 if not force and merge and not match.always():
1347 if not force and merge and not match.always():
1348 raise util.Abort(_('cannot partially commit a merge '
1348 raise util.Abort(_('cannot partially commit a merge '
1349 '(do not specify files or patterns)'))
1349 '(do not specify files or patterns)'))
1350
1350
1351 status = self.status(match=match, clean=force)
1351 status = self.status(match=match, clean=force)
1352 if force:
1352 if force:
1353 status.modified.extend(status.clean) # mq may commit clean files
1353 status.modified.extend(status.clean) # mq may commit clean files
1354
1354
1355 # check subrepos
1355 # check subrepos
1356 subs = []
1356 subs = []
1357 commitsubs = set()
1357 commitsubs = set()
1358 newstate = wctx.substate.copy()
1358 newstate = wctx.substate.copy()
1359 # only manage subrepos and .hgsubstate if .hgsub is present
1359 # only manage subrepos and .hgsubstate if .hgsub is present
1360 if '.hgsub' in wctx:
1360 if '.hgsub' in wctx:
1361 # we'll decide whether to track this ourselves, thanks
1361 # we'll decide whether to track this ourselves, thanks
1362 for c in status.modified, status.added, status.removed:
1362 for c in status.modified, status.added, status.removed:
1363 if '.hgsubstate' in c:
1363 if '.hgsubstate' in c:
1364 c.remove('.hgsubstate')
1364 c.remove('.hgsubstate')
1365
1365
1366 # compare current state to last committed state
1366 # compare current state to last committed state
1367 # build new substate based on last committed state
1367 # build new substate based on last committed state
1368 oldstate = wctx.p1().substate
1368 oldstate = wctx.p1().substate
1369 for s in sorted(newstate.keys()):
1369 for s in sorted(newstate.keys()):
1370 if not match(s):
1370 if not match(s):
1371 # ignore working copy, use old state if present
1371 # ignore working copy, use old state if present
1372 if s in oldstate:
1372 if s in oldstate:
1373 newstate[s] = oldstate[s]
1373 newstate[s] = oldstate[s]
1374 continue
1374 continue
1375 if not force:
1375 if not force:
1376 raise util.Abort(
1376 raise util.Abort(
1377 _("commit with new subrepo %s excluded") % s)
1377 _("commit with new subrepo %s excluded") % s)
1378 dirtyreason = wctx.sub(s).dirtyreason(True)
1378 dirtyreason = wctx.sub(s).dirtyreason(True)
1379 if dirtyreason:
1379 if dirtyreason:
1380 if not self.ui.configbool('ui', 'commitsubrepos'):
1380 if not self.ui.configbool('ui', 'commitsubrepos'):
1381 raise util.Abort(dirtyreason,
1381 raise util.Abort(dirtyreason,
1382 hint=_("use --subrepos for recursive commit"))
1382 hint=_("use --subrepos for recursive commit"))
1383 subs.append(s)
1383 subs.append(s)
1384 commitsubs.add(s)
1384 commitsubs.add(s)
1385 else:
1385 else:
1386 bs = wctx.sub(s).basestate()
1386 bs = wctx.sub(s).basestate()
1387 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1387 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1388 if oldstate.get(s, (None, None, None))[1] != bs:
1388 if oldstate.get(s, (None, None, None))[1] != bs:
1389 subs.append(s)
1389 subs.append(s)
1390
1390
1391 # check for removed subrepos
1391 # check for removed subrepos
1392 for p in wctx.parents():
1392 for p in wctx.parents():
1393 r = [s for s in p.substate if s not in newstate]
1393 r = [s for s in p.substate if s not in newstate]
1394 subs += [s for s in r if match(s)]
1394 subs += [s for s in r if match(s)]
1395 if subs:
1395 if subs:
1396 if (not match('.hgsub') and
1396 if (not match('.hgsub') and
1397 '.hgsub' in (wctx.modified() + wctx.added())):
1397 '.hgsub' in (wctx.modified() + wctx.added())):
1398 raise util.Abort(
1398 raise util.Abort(
1399 _("can't commit subrepos without .hgsub"))
1399 _("can't commit subrepos without .hgsub"))
1400 status.modified.insert(0, '.hgsubstate')
1400 status.modified.insert(0, '.hgsubstate')
1401
1401
1402 elif '.hgsub' in status.removed:
1402 elif '.hgsub' in status.removed:
1403 # clean up .hgsubstate when .hgsub is removed
1403 # clean up .hgsubstate when .hgsub is removed
1404 if ('.hgsubstate' in wctx and
1404 if ('.hgsubstate' in wctx and
1405 '.hgsubstate' not in (status.modified + status.added +
1405 '.hgsubstate' not in (status.modified + status.added +
1406 status.removed)):
1406 status.removed)):
1407 status.removed.insert(0, '.hgsubstate')
1407 status.removed.insert(0, '.hgsubstate')
1408
1408
1409 # make sure all explicit patterns are matched
1409 # make sure all explicit patterns are matched
1410 if not force and match.files():
1410 if not force and match.files():
1411 matched = set(status.modified + status.added + status.removed)
1411 matched = set(status.modified + status.added + status.removed)
1412
1412
1413 for f in match.files():
1413 for f in match.files():
1414 f = self.dirstate.normalize(f)
1414 f = self.dirstate.normalize(f)
1415 if f == '.' or f in matched or f in wctx.substate:
1415 if f == '.' or f in matched or f in wctx.substate:
1416 continue
1416 continue
1417 if f in status.deleted:
1417 if f in status.deleted:
1418 fail(f, _('file not found!'))
1418 fail(f, _('file not found!'))
1419 if f in vdirs: # visited directory
1419 if f in vdirs: # visited directory
1420 d = f + '/'
1420 d = f + '/'
1421 for mf in matched:
1421 for mf in matched:
1422 if mf.startswith(d):
1422 if mf.startswith(d):
1423 break
1423 break
1424 else:
1424 else:
1425 fail(f, _("no match under directory!"))
1425 fail(f, _("no match under directory!"))
1426 elif f not in self.dirstate:
1426 elif f not in self.dirstate:
1427 fail(f, _("file not tracked!"))
1427 fail(f, _("file not tracked!"))
1428
1428
1429 cctx = context.workingcommitctx(self, status,
1429 cctx = context.workingcommitctx(self, status,
1430 text, user, date, extra)
1430 text, user, date, extra)
1431
1431
1432 if (not force and not extra.get("close") and not merge
1432 if (not force and not extra.get("close") and not merge
1433 and not cctx.files()
1433 and not cctx.files()
1434 and wctx.branch() == wctx.p1().branch()):
1434 and wctx.branch() == wctx.p1().branch()):
1435 return None
1435 return None
1436
1436
1437 if merge and cctx.deleted():
1437 if merge and cctx.deleted():
1438 raise util.Abort(_("cannot commit merge with missing files"))
1438 raise util.Abort(_("cannot commit merge with missing files"))
1439
1439
1440 ms = mergemod.mergestate(self)
1440 ms = mergemod.mergestate(self)
1441 for f in status.modified:
1441 for f in status.modified:
1442 if f in ms and ms[f] == 'u':
1442 if f in ms and ms[f] == 'u':
1443 raise util.Abort(_('unresolved merge conflicts '
1443 raise util.Abort(_('unresolved merge conflicts '
1444 '(see "hg help resolve")'))
1444 '(see "hg help resolve")'))
1445
1445
1446 if editor:
1446 if editor:
1447 cctx._text = editor(self, cctx, subs)
1447 cctx._text = editor(self, cctx, subs)
1448 edited = (text != cctx._text)
1448 edited = (text != cctx._text)
1449
1449
1450 # Save commit message in case this transaction gets rolled back
1450 # Save commit message in case this transaction gets rolled back
1451 # (e.g. by a pretxncommit hook). Leave the content alone on
1451 # (e.g. by a pretxncommit hook). Leave the content alone on
1452 # the assumption that the user will use the same editor again.
1452 # the assumption that the user will use the same editor again.
1453 msgfn = self.savecommitmessage(cctx._text)
1453 msgfn = self.savecommitmessage(cctx._text)
1454
1454
1455 # commit subs and write new state
1455 # commit subs and write new state
1456 if subs:
1456 if subs:
1457 for s in sorted(commitsubs):
1457 for s in sorted(commitsubs):
1458 sub = wctx.sub(s)
1458 sub = wctx.sub(s)
1459 self.ui.status(_('committing subrepository %s\n') %
1459 self.ui.status(_('committing subrepository %s\n') %
1460 subrepo.subrelpath(sub))
1460 subrepo.subrelpath(sub))
1461 sr = sub.commit(cctx._text, user, date)
1461 sr = sub.commit(cctx._text, user, date)
1462 newstate[s] = (newstate[s][0], sr)
1462 newstate[s] = (newstate[s][0], sr)
1463 subrepo.writestate(self, newstate)
1463 subrepo.writestate(self, newstate)
1464
1464
1465 p1, p2 = self.dirstate.parents()
1465 p1, p2 = self.dirstate.parents()
1466 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1466 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1467 try:
1467 try:
1468 self.hook("precommit", throw=True, parent1=hookp1,
1468 self.hook("precommit", throw=True, parent1=hookp1,
1469 parent2=hookp2)
1469 parent2=hookp2)
1470 ret = self.commitctx(cctx, True)
1470 ret = self.commitctx(cctx, True)
1471 except: # re-raises
1471 except: # re-raises
1472 if edited:
1472 if edited:
1473 self.ui.write(
1473 self.ui.write(
1474 _('note: commit message saved in %s\n') % msgfn)
1474 _('note: commit message saved in %s\n') % msgfn)
1475 raise
1475 raise
1476
1476
1477 # update bookmarks, dirstate and mergestate
1477 # update bookmarks, dirstate and mergestate
1478 bookmarks.update(self, [p1, p2], ret)
1478 bookmarks.update(self, [p1, p2], ret)
1479 cctx.markcommitted(ret)
1479 cctx.markcommitted(ret)
1480 ms.reset()
1480 ms.reset()
1481 finally:
1481 finally:
1482 wlock.release()
1482 wlock.release()
1483
1483
1484 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1484 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1485 # hack for command that use a temporary commit (eg: histedit)
1485 # hack for command that use a temporary commit (eg: histedit)
1486 # temporary commit got stripped before hook release
1486 # temporary commit got stripped before hook release
1487 if node in self:
1487 if node in self:
1488 self.hook("commit", node=node, parent1=parent1,
1488 self.hook("commit", node=node, parent1=parent1,
1489 parent2=parent2)
1489 parent2=parent2)
1490 self._afterlock(commithook)
1490 self._afterlock(commithook)
1491 return ret
1491 return ret
1492
1492
1493 @unfilteredmethod
1493 @unfilteredmethod
1494 def commitctx(self, ctx, error=False):
1494 def commitctx(self, ctx, error=False):
1495 """Add a new revision to current repository.
1495 """Add a new revision to current repository.
1496 Revision information is passed via the context argument.
1496 Revision information is passed via the context argument.
1497 """
1497 """
1498
1498
1499 tr = None
1499 tr = None
1500 p1, p2 = ctx.p1(), ctx.p2()
1500 p1, p2 = ctx.p1(), ctx.p2()
1501 user = ctx.user()
1501 user = ctx.user()
1502
1502
1503 lock = self.lock()
1503 lock = self.lock()
1504 try:
1504 try:
1505 tr = self.transaction("commit")
1505 tr = self.transaction("commit")
1506 trp = weakref.proxy(tr)
1506 trp = weakref.proxy(tr)
1507
1507
1508 if ctx.files():
1508 if ctx.files():
1509 m1 = p1.manifest()
1509 m1 = p1.manifest()
1510 m2 = p2.manifest()
1510 m2 = p2.manifest()
1511 m = m1.copy()
1511 m = m1.copy()
1512
1512
1513 # check in files
1513 # check in files
1514 added = []
1514 added = []
1515 changed = []
1515 changed = []
1516 removed = list(ctx.removed())
1516 removed = list(ctx.removed())
1517 linkrev = len(self)
1517 linkrev = len(self)
1518 self.ui.note(_("committing files:\n"))
1518 self.ui.note(_("committing files:\n"))
1519 for f in sorted(ctx.modified() + ctx.added()):
1519 for f in sorted(ctx.modified() + ctx.added()):
1520 self.ui.note(f + "\n")
1520 self.ui.note(f + "\n")
1521 try:
1521 try:
1522 fctx = ctx[f]
1522 fctx = ctx[f]
1523 if fctx is None:
1523 if fctx is None:
1524 removed.append(f)
1524 removed.append(f)
1525 else:
1525 else:
1526 added.append(f)
1526 added.append(f)
1527 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1527 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1528 trp, changed)
1528 trp, changed)
1529 m.setflag(f, fctx.flags())
1529 m.setflag(f, fctx.flags())
1530 except OSError, inst:
1530 except OSError, inst:
1531 self.ui.warn(_("trouble committing %s!\n") % f)
1531 self.ui.warn(_("trouble committing %s!\n") % f)
1532 raise
1532 raise
1533 except IOError, inst:
1533 except IOError, inst:
1534 errcode = getattr(inst, 'errno', errno.ENOENT)
1534 errcode = getattr(inst, 'errno', errno.ENOENT)
1535 if error or errcode and errcode != errno.ENOENT:
1535 if error or errcode and errcode != errno.ENOENT:
1536 self.ui.warn(_("trouble committing %s!\n") % f)
1536 self.ui.warn(_("trouble committing %s!\n") % f)
1537 raise
1537 raise
1538
1538
1539 # update manifest
1539 # update manifest
1540 self.ui.note(_("committing manifest\n"))
1540 self.ui.note(_("committing manifest\n"))
1541 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1541 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1542 drop = [f for f in removed if f in m]
1542 drop = [f for f in removed if f in m]
1543 for f in drop:
1543 for f in drop:
1544 del m[f]
1544 del m[f]
1545 mn = self.manifest.add(m, trp, linkrev,
1545 mn = self.manifest.add(m, trp, linkrev,
1546 p1.manifestnode(), p2.manifestnode(),
1546 p1.manifestnode(), p2.manifestnode(),
1547 added, drop)
1547 added, drop)
1548 files = changed + removed
1548 files = changed + removed
1549 else:
1549 else:
1550 mn = p1.manifestnode()
1550 mn = p1.manifestnode()
1551 files = []
1551 files = []
1552
1552
1553 # update changelog
1553 # update changelog
1554 self.ui.note(_("committing changelog\n"))
1554 self.ui.note(_("committing changelog\n"))
1555 self.changelog.delayupdate(tr)
1555 self.changelog.delayupdate(tr)
1556 n = self.changelog.add(mn, files, ctx.description(),
1556 n = self.changelog.add(mn, files, ctx.description(),
1557 trp, p1.node(), p2.node(),
1557 trp, p1.node(), p2.node(),
1558 user, ctx.date(), ctx.extra().copy())
1558 user, ctx.date(), ctx.extra().copy())
1559 p = lambda: tr.writepending() and self.root or ""
1559 p = lambda: tr.writepending() and self.root or ""
1560 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1560 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1561 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1561 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1562 parent2=xp2, pending=p)
1562 parent2=xp2, pending=p)
1563 # set the new commit is proper phase
1563 # set the new commit is proper phase
1564 targetphase = subrepo.newcommitphase(self.ui, ctx)
1564 targetphase = subrepo.newcommitphase(self.ui, ctx)
1565 if targetphase:
1565 if targetphase:
1566 # retract boundary do not alter parent changeset.
1566 # retract boundary do not alter parent changeset.
1567 # if a parent have higher the resulting phase will
1567 # if a parent have higher the resulting phase will
1568 # be compliant anyway
1568 # be compliant anyway
1569 #
1569 #
1570 # if minimal phase was 0 we don't need to retract anything
1570 # if minimal phase was 0 we don't need to retract anything
1571 phases.retractboundary(self, tr, targetphase, [n])
1571 phases.retractboundary(self, tr, targetphase, [n])
1572 tr.close()
1572 tr.close()
1573 branchmap.updatecache(self.filtered('served'))
1573 branchmap.updatecache(self.filtered('served'))
1574 return n
1574 return n
1575 finally:
1575 finally:
1576 if tr:
1576 if tr:
1577 tr.release()
1577 tr.release()
1578 lock.release()
1578 lock.release()
1579
1579
1580 @unfilteredmethod
1580 @unfilteredmethod
1581 def destroying(self):
1581 def destroying(self):
1582 '''Inform the repository that nodes are about to be destroyed.
1582 '''Inform the repository that nodes are about to be destroyed.
1583 Intended for use by strip and rollback, so there's a common
1583 Intended for use by strip and rollback, so there's a common
1584 place for anything that has to be done before destroying history.
1584 place for anything that has to be done before destroying history.
1585
1585
1586 This is mostly useful for saving state that is in memory and waiting
1586 This is mostly useful for saving state that is in memory and waiting
1587 to be flushed when the current lock is released. Because a call to
1587 to be flushed when the current lock is released. Because a call to
1588 destroyed is imminent, the repo will be invalidated causing those
1588 destroyed is imminent, the repo will be invalidated causing those
1589 changes to stay in memory (waiting for the next unlock), or vanish
1589 changes to stay in memory (waiting for the next unlock), or vanish
1590 completely.
1590 completely.
1591 '''
1591 '''
1592 # When using the same lock to commit and strip, the phasecache is left
1592 # When using the same lock to commit and strip, the phasecache is left
1593 # dirty after committing. Then when we strip, the repo is invalidated,
1593 # dirty after committing. Then when we strip, the repo is invalidated,
1594 # causing those changes to disappear.
1594 # causing those changes to disappear.
1595 if '_phasecache' in vars(self):
1595 if '_phasecache' in vars(self):
1596 self._phasecache.write()
1596 self._phasecache.write()
1597
1597
1598 @unfilteredmethod
1598 @unfilteredmethod
1599 def destroyed(self):
1599 def destroyed(self):
1600 '''Inform the repository that nodes have been destroyed.
1600 '''Inform the repository that nodes have been destroyed.
1601 Intended for use by strip and rollback, so there's a common
1601 Intended for use by strip and rollback, so there's a common
1602 place for anything that has to be done after destroying history.
1602 place for anything that has to be done after destroying history.
1603 '''
1603 '''
1604 # When one tries to:
1604 # When one tries to:
1605 # 1) destroy nodes thus calling this method (e.g. strip)
1605 # 1) destroy nodes thus calling this method (e.g. strip)
1606 # 2) use phasecache somewhere (e.g. commit)
1606 # 2) use phasecache somewhere (e.g. commit)
1607 #
1607 #
1608 # then 2) will fail because the phasecache contains nodes that were
1608 # then 2) will fail because the phasecache contains nodes that were
1609 # removed. We can either remove phasecache from the filecache,
1609 # removed. We can either remove phasecache from the filecache,
1610 # causing it to reload next time it is accessed, or simply filter
1610 # causing it to reload next time it is accessed, or simply filter
1611 # the removed nodes now and write the updated cache.
1611 # the removed nodes now and write the updated cache.
1612 self._phasecache.filterunknown(self)
1612 self._phasecache.filterunknown(self)
1613 self._phasecache.write()
1613 self._phasecache.write()
1614
1614
1615 # update the 'served' branch cache to help read only server process
1615 # update the 'served' branch cache to help read only server process
1616 # Thanks to branchcache collaboration this is done from the nearest
1616 # Thanks to branchcache collaboration this is done from the nearest
1617 # filtered subset and it is expected to be fast.
1617 # filtered subset and it is expected to be fast.
1618 branchmap.updatecache(self.filtered('served'))
1618 branchmap.updatecache(self.filtered('served'))
1619
1619
1620 # Ensure the persistent tag cache is updated. Doing it now
1620 # Ensure the persistent tag cache is updated. Doing it now
1621 # means that the tag cache only has to worry about destroyed
1621 # means that the tag cache only has to worry about destroyed
1622 # heads immediately after a strip/rollback. That in turn
1622 # heads immediately after a strip/rollback. That in turn
1623 # guarantees that "cachetip == currenttip" (comparing both rev
1623 # guarantees that "cachetip == currenttip" (comparing both rev
1624 # and node) always means no nodes have been added or destroyed.
1624 # and node) always means no nodes have been added or destroyed.
1625
1625
1626 # XXX this is suboptimal when qrefresh'ing: we strip the current
1626 # XXX this is suboptimal when qrefresh'ing: we strip the current
1627 # head, refresh the tag cache, then immediately add a new head.
1627 # head, refresh the tag cache, then immediately add a new head.
1628 # But I think doing it this way is necessary for the "instant
1628 # But I think doing it this way is necessary for the "instant
1629 # tag cache retrieval" case to work.
1629 # tag cache retrieval" case to work.
1630 self.invalidate()
1630 self.invalidate()
1631
1631
1632 def walk(self, match, node=None):
1632 def walk(self, match, node=None):
1633 '''
1633 '''
1634 walk recursively through the directory tree or a given
1634 walk recursively through the directory tree or a given
1635 changeset, finding all files matched by the match
1635 changeset, finding all files matched by the match
1636 function
1636 function
1637 '''
1637 '''
1638 return self[node].walk(match)
1638 return self[node].walk(match)
1639
1639
1640 def status(self, node1='.', node2=None, match=None,
1640 def status(self, node1='.', node2=None, match=None,
1641 ignored=False, clean=False, unknown=False,
1641 ignored=False, clean=False, unknown=False,
1642 listsubrepos=False):
1642 listsubrepos=False):
1643 '''a convenience method that calls node1.status(node2)'''
1643 '''a convenience method that calls node1.status(node2)'''
1644 return self[node1].status(node2, match, ignored, clean, unknown,
1644 return self[node1].status(node2, match, ignored, clean, unknown,
1645 listsubrepos)
1645 listsubrepos)
1646
1646
1647 def heads(self, start=None):
1647 def heads(self, start=None):
1648 heads = self.changelog.heads(start)
1648 heads = self.changelog.heads(start)
1649 # sort the output in rev descending order
1649 # sort the output in rev descending order
1650 return sorted(heads, key=self.changelog.rev, reverse=True)
1650 return sorted(heads, key=self.changelog.rev, reverse=True)
1651
1651
1652 def branchheads(self, branch=None, start=None, closed=False):
1652 def branchheads(self, branch=None, start=None, closed=False):
1653 '''return a (possibly filtered) list of heads for the given branch
1653 '''return a (possibly filtered) list of heads for the given branch
1654
1654
1655 Heads are returned in topological order, from newest to oldest.
1655 Heads are returned in topological order, from newest to oldest.
1656 If branch is None, use the dirstate branch.
1656 If branch is None, use the dirstate branch.
1657 If start is not None, return only heads reachable from start.
1657 If start is not None, return only heads reachable from start.
1658 If closed is True, return heads that are marked as closed as well.
1658 If closed is True, return heads that are marked as closed as well.
1659 '''
1659 '''
1660 if branch is None:
1660 if branch is None:
1661 branch = self[None].branch()
1661 branch = self[None].branch()
1662 branches = self.branchmap()
1662 branches = self.branchmap()
1663 if branch not in branches:
1663 if branch not in branches:
1664 return []
1664 return []
1665 # the cache returns heads ordered lowest to highest
1665 # the cache returns heads ordered lowest to highest
1666 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1666 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1667 if start is not None:
1667 if start is not None:
1668 # filter out the heads that cannot be reached from startrev
1668 # filter out the heads that cannot be reached from startrev
1669 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1669 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1670 bheads = [h for h in bheads if h in fbheads]
1670 bheads = [h for h in bheads if h in fbheads]
1671 return bheads
1671 return bheads
1672
1672
1673 def branches(self, nodes):
1673 def branches(self, nodes):
1674 if not nodes:
1674 if not nodes:
1675 nodes = [self.changelog.tip()]
1675 nodes = [self.changelog.tip()]
1676 b = []
1676 b = []
1677 for n in nodes:
1677 for n in nodes:
1678 t = n
1678 t = n
1679 while True:
1679 while True:
1680 p = self.changelog.parents(n)
1680 p = self.changelog.parents(n)
1681 if p[1] != nullid or p[0] == nullid:
1681 if p[1] != nullid or p[0] == nullid:
1682 b.append((t, n, p[0], p[1]))
1682 b.append((t, n, p[0], p[1]))
1683 break
1683 break
1684 n = p[0]
1684 n = p[0]
1685 return b
1685 return b
1686
1686
1687 def between(self, pairs):
1687 def between(self, pairs):
1688 r = []
1688 r = []
1689
1689
1690 for top, bottom in pairs:
1690 for top, bottom in pairs:
1691 n, l, i = top, [], 0
1691 n, l, i = top, [], 0
1692 f = 1
1692 f = 1
1693
1693
1694 while n != bottom and n != nullid:
1694 while n != bottom and n != nullid:
1695 p = self.changelog.parents(n)[0]
1695 p = self.changelog.parents(n)[0]
1696 if i == f:
1696 if i == f:
1697 l.append(n)
1697 l.append(n)
1698 f = f * 2
1698 f = f * 2
1699 n = p
1699 n = p
1700 i += 1
1700 i += 1
1701
1701
1702 r.append(l)
1702 r.append(l)
1703
1703
1704 return r
1704 return r
1705
1705
1706 def checkpush(self, pushop):
1706 def checkpush(self, pushop):
1707 """Extensions can override this function if additional checks have
1707 """Extensions can override this function if additional checks have
1708 to be performed before pushing, or call it if they override push
1708 to be performed before pushing, or call it if they override push
1709 command.
1709 command.
1710 """
1710 """
1711 pass
1711 pass
1712
1712
1713 @unfilteredpropertycache
1713 @unfilteredpropertycache
1714 def prepushoutgoinghooks(self):
1714 def prepushoutgoinghooks(self):
1715 """Return util.hooks consists of "(repo, remote, outgoing)"
1715 """Return util.hooks consists of "(repo, remote, outgoing)"
1716 functions, which are called before pushing changesets.
1716 functions, which are called before pushing changesets.
1717 """
1717 """
1718 return util.hooks()
1718 return util.hooks()
1719
1719
1720 def stream_in(self, remote, requirements):
1720 def stream_in(self, remote, requirements):
1721 lock = self.lock()
1721 lock = self.lock()
1722 try:
1722 try:
1723 # Save remote branchmap. We will use it later
1723 # Save remote branchmap. We will use it later
1724 # to speed up branchcache creation
1724 # to speed up branchcache creation
1725 rbranchmap = None
1725 rbranchmap = None
1726 if remote.capable("branchmap"):
1726 if remote.capable("branchmap"):
1727 rbranchmap = remote.branchmap()
1727 rbranchmap = remote.branchmap()
1728
1728
1729 fp = remote.stream_out()
1729 fp = remote.stream_out()
1730 l = fp.readline()
1730 l = fp.readline()
1731 try:
1731 try:
1732 resp = int(l)
1732 resp = int(l)
1733 except ValueError:
1733 except ValueError:
1734 raise error.ResponseError(
1734 raise error.ResponseError(
1735 _('unexpected response from remote server:'), l)
1735 _('unexpected response from remote server:'), l)
1736 if resp == 1:
1736 if resp == 1:
1737 raise util.Abort(_('operation forbidden by server'))
1737 raise util.Abort(_('operation forbidden by server'))
1738 elif resp == 2:
1738 elif resp == 2:
1739 raise util.Abort(_('locking the remote repository failed'))
1739 raise util.Abort(_('locking the remote repository failed'))
1740 elif resp != 0:
1740 elif resp != 0:
1741 raise util.Abort(_('the server sent an unknown error code'))
1741 raise util.Abort(_('the server sent an unknown error code'))
1742 self.ui.status(_('streaming all changes\n'))
1742 self.ui.status(_('streaming all changes\n'))
1743 l = fp.readline()
1743 l = fp.readline()
1744 try:
1744 try:
1745 total_files, total_bytes = map(int, l.split(' ', 1))
1745 total_files, total_bytes = map(int, l.split(' ', 1))
1746 except (ValueError, TypeError):
1746 except (ValueError, TypeError):
1747 raise error.ResponseError(
1747 raise error.ResponseError(
1748 _('unexpected response from remote server:'), l)
1748 _('unexpected response from remote server:'), l)
1749 self.ui.status(_('%d files to transfer, %s of data\n') %
1749 self.ui.status(_('%d files to transfer, %s of data\n') %
1750 (total_files, util.bytecount(total_bytes)))
1750 (total_files, util.bytecount(total_bytes)))
1751 handled_bytes = 0
1751 handled_bytes = 0
1752 self.ui.progress(_('clone'), 0, total=total_bytes)
1752 self.ui.progress(_('clone'), 0, total=total_bytes)
1753 start = time.time()
1753 start = time.time()
1754
1754
1755 tr = self.transaction(_('clone'))
1755 tr = self.transaction(_('clone'))
1756 try:
1756 try:
1757 for i in xrange(total_files):
1757 for i in xrange(total_files):
1758 # XXX doesn't support '\n' or '\r' in filenames
1758 # XXX doesn't support '\n' or '\r' in filenames
1759 l = fp.readline()
1759 l = fp.readline()
1760 try:
1760 try:
1761 name, size = l.split('\0', 1)
1761 name, size = l.split('\0', 1)
1762 size = int(size)
1762 size = int(size)
1763 except (ValueError, TypeError):
1763 except (ValueError, TypeError):
1764 raise error.ResponseError(
1764 raise error.ResponseError(
1765 _('unexpected response from remote server:'), l)
1765 _('unexpected response from remote server:'), l)
1766 if self.ui.debugflag:
1766 if self.ui.debugflag:
1767 self.ui.debug('adding %s (%s)\n' %
1767 self.ui.debug('adding %s (%s)\n' %
1768 (name, util.bytecount(size)))
1768 (name, util.bytecount(size)))
1769 # for backwards compat, name was partially encoded
1769 # for backwards compat, name was partially encoded
1770 ofp = self.svfs(store.decodedir(name), 'w')
1770 ofp = self.svfs(store.decodedir(name), 'w')
1771 for chunk in util.filechunkiter(fp, limit=size):
1771 for chunk in util.filechunkiter(fp, limit=size):
1772 handled_bytes += len(chunk)
1772 handled_bytes += len(chunk)
1773 self.ui.progress(_('clone'), handled_bytes,
1773 self.ui.progress(_('clone'), handled_bytes,
1774 total=total_bytes)
1774 total=total_bytes)
1775 ofp.write(chunk)
1775 ofp.write(chunk)
1776 ofp.close()
1776 ofp.close()
1777 tr.close()
1777 tr.close()
1778 finally:
1778 finally:
1779 tr.release()
1779 tr.release()
1780
1780
1781 # Writing straight to files circumvented the inmemory caches
1781 # Writing straight to files circumvented the inmemory caches
1782 self.invalidate()
1782 self.invalidate()
1783
1783
1784 elapsed = time.time() - start
1784 elapsed = time.time() - start
1785 if elapsed <= 0:
1785 if elapsed <= 0:
1786 elapsed = 0.001
1786 elapsed = 0.001
1787 self.ui.progress(_('clone'), None)
1787 self.ui.progress(_('clone'), None)
1788 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1788 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1789 (util.bytecount(total_bytes), elapsed,
1789 (util.bytecount(total_bytes), elapsed,
1790 util.bytecount(total_bytes / elapsed)))
1790 util.bytecount(total_bytes / elapsed)))
1791
1791
1792 # new requirements = old non-format requirements +
1792 # new requirements = old non-format requirements +
1793 # new format-related
1793 # new format-related
1794 # requirements from the streamed-in repository
1794 # requirements from the streamed-in repository
1795 requirements.update(set(self.requirements) - self.supportedformats)
1795 requirements.update(set(self.requirements) - self.supportedformats)
1796 self._applyrequirements(requirements)
1796 self._applyrequirements(requirements)
1797 self._writerequirements()
1797 self._writerequirements()
1798
1798
1799 if rbranchmap:
1799 if rbranchmap:
1800 rbheads = []
1800 rbheads = []
1801 closed = []
1801 closed = []
1802 for bheads in rbranchmap.itervalues():
1802 for bheads in rbranchmap.itervalues():
1803 rbheads.extend(bheads)
1803 rbheads.extend(bheads)
1804 for h in bheads:
1804 for h in bheads:
1805 r = self.changelog.rev(h)
1805 r = self.changelog.rev(h)
1806 b, c = self.changelog.branchinfo(r)
1806 b, c = self.changelog.branchinfo(r)
1807 if c:
1807 if c:
1808 closed.append(h)
1808 closed.append(h)
1809
1809
1810 if rbheads:
1810 if rbheads:
1811 rtiprev = max((int(self.changelog.rev(node))
1811 rtiprev = max((int(self.changelog.rev(node))
1812 for node in rbheads))
1812 for node in rbheads))
1813 cache = branchmap.branchcache(rbranchmap,
1813 cache = branchmap.branchcache(rbranchmap,
1814 self[rtiprev].node(),
1814 self[rtiprev].node(),
1815 rtiprev,
1815 rtiprev,
1816 closednodes=closed)
1816 closednodes=closed)
1817 # Try to stick it as low as possible
1817 # Try to stick it as low as possible
1818 # filter above served are unlikely to be fetch from a clone
1818 # filter above served are unlikely to be fetch from a clone
1819 for candidate in ('base', 'immutable', 'served'):
1819 for candidate in ('base', 'immutable', 'served'):
1820 rview = self.filtered(candidate)
1820 rview = self.filtered(candidate)
1821 if cache.validfor(rview):
1821 if cache.validfor(rview):
1822 self._branchcaches[candidate] = cache
1822 self._branchcaches[candidate] = cache
1823 cache.write(rview)
1823 cache.write(rview)
1824 break
1824 break
1825 self.invalidate()
1825 self.invalidate()
1826 return len(self.heads()) + 1
1826 return len(self.heads()) + 1
1827 finally:
1827 finally:
1828 lock.release()
1828 lock.release()
1829
1829
1830 def clone(self, remote, heads=[], stream=None):
1830 def clone(self, remote, heads=[], stream=None):
1831 '''clone remote repository.
1831 '''clone remote repository.
1832
1832
1833 keyword arguments:
1833 keyword arguments:
1834 heads: list of revs to clone (forces use of pull)
1834 heads: list of revs to clone (forces use of pull)
1835 stream: use streaming clone if possible'''
1835 stream: use streaming clone if possible'''
1836
1836
1837 # now, all clients that can request uncompressed clones can
1837 # now, all clients that can request uncompressed clones can
1838 # read repo formats supported by all servers that can serve
1838 # read repo formats supported by all servers that can serve
1839 # them.
1839 # them.
1840
1840
1841 # if revlog format changes, client will have to check version
1841 # if revlog format changes, client will have to check version
1842 # and format flags on "stream" capability, and use
1842 # and format flags on "stream" capability, and use
1843 # uncompressed only if compatible.
1843 # uncompressed only if compatible.
1844
1844
1845 if stream is None:
1845 if stream is None:
1846 # if the server explicitly prefers to stream (for fast LANs)
1846 # if the server explicitly prefers to stream (for fast LANs)
1847 stream = remote.capable('stream-preferred')
1847 stream = remote.capable('stream-preferred')
1848
1848
1849 if stream and not heads:
1849 if stream and not heads:
1850 # 'stream' means remote revlog format is revlogv1 only
1850 # 'stream' means remote revlog format is revlogv1 only
1851 if remote.capable('stream'):
1851 if remote.capable('stream'):
1852 self.stream_in(remote, set(('revlogv1',)))
1852 self.stream_in(remote, set(('revlogv1',)))
1853 else:
1853 else:
1854 # otherwise, 'streamreqs' contains the remote revlog format
1854 # otherwise, 'streamreqs' contains the remote revlog format
1855 streamreqs = remote.capable('streamreqs')
1855 streamreqs = remote.capable('streamreqs')
1856 if streamreqs:
1856 if streamreqs:
1857 streamreqs = set(streamreqs.split(','))
1857 streamreqs = set(streamreqs.split(','))
1858 # if we support it, stream in and adjust our requirements
1858 # if we support it, stream in and adjust our requirements
1859 if not streamreqs - self.supportedformats:
1859 if not streamreqs - self.supportedformats:
1860 self.stream_in(remote, streamreqs)
1860 self.stream_in(remote, streamreqs)
1861
1861
1862 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1862 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1863 try:
1863 try:
1864 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1864 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1865 ret = exchange.pull(self, remote, heads).cgresult
1865 ret = exchange.pull(self, remote, heads).cgresult
1866 finally:
1866 finally:
1867 self.ui.restoreconfig(quiet)
1867 self.ui.restoreconfig(quiet)
1868 return ret
1868 return ret
1869
1869
1870 def pushkey(self, namespace, key, old, new):
1870 def pushkey(self, namespace, key, old, new):
1871 try:
1871 try:
1872 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1872 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1873 old=old, new=new)
1873 old=old, new=new)
1874 except error.HookAbort, exc:
1874 except error.HookAbort, exc:
1875 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1875 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1876 if exc.hint:
1876 if exc.hint:
1877 self.ui.write_err(_("(%s)\n") % exc.hint)
1877 self.ui.write_err(_("(%s)\n") % exc.hint)
1878 return False
1878 return False
1879 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1879 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1880 ret = pushkey.push(self, namespace, key, old, new)
1880 ret = pushkey.push(self, namespace, key, old, new)
1881 def runhook():
1881 def runhook():
1882 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1882 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1883 ret=ret)
1883 ret=ret)
1884 self._afterlock(runhook)
1884 self._afterlock(runhook)
1885 return ret
1885 return ret
1886
1886
1887 def listkeys(self, namespace):
1887 def listkeys(self, namespace):
1888 self.hook('prelistkeys', throw=True, namespace=namespace)
1888 self.hook('prelistkeys', throw=True, namespace=namespace)
1889 self.ui.debug('listing keys for "%s"\n' % namespace)
1889 self.ui.debug('listing keys for "%s"\n' % namespace)
1890 values = pushkey.list(self, namespace)
1890 values = pushkey.list(self, namespace)
1891 self.hook('listkeys', namespace=namespace, values=values)
1891 self.hook('listkeys', namespace=namespace, values=values)
1892 return values
1892 return values
1893
1893
1894 def debugwireargs(self, one, two, three=None, four=None, five=None):
1894 def debugwireargs(self, one, two, three=None, four=None, five=None):
1895 '''used to test argument passing over the wire'''
1895 '''used to test argument passing over the wire'''
1896 return "%s %s %s %s %s" % (one, two, three, four, five)
1896 return "%s %s %s %s %s" % (one, two, three, four, five)
1897
1897
1898 def savecommitmessage(self, text):
1898 def savecommitmessage(self, text):
1899 fp = self.vfs('last-message.txt', 'wb')
1899 fp = self.vfs('last-message.txt', 'wb')
1900 try:
1900 try:
1901 fp.write(text)
1901 fp.write(text)
1902 finally:
1902 finally:
1903 fp.close()
1903 fp.close()
1904 return self.pathto(fp.name[len(self.root) + 1:])
1904 return self.pathto(fp.name[len(self.root) + 1:])
1905
1905
1906 # used to avoid circular references so destructors work
1906 # used to avoid circular references so destructors work
1907 def aftertrans(files):
1907 def aftertrans(files):
1908 renamefiles = [tuple(t) for t in files]
1908 renamefiles = [tuple(t) for t in files]
1909 def a():
1909 def a():
1910 for vfs, src, dest in renamefiles:
1910 for vfs, src, dest in renamefiles:
1911 try:
1911 try:
1912 vfs.rename(src, dest)
1912 vfs.rename(src, dest)
1913 except OSError: # journal file does not yet exist
1913 except OSError: # journal file does not yet exist
1914 pass
1914 pass
1915 return a
1915 return a
1916
1916
1917 def undoname(fn):
1917 def undoname(fn):
1918 base, name = os.path.split(fn)
1918 base, name = os.path.split(fn)
1919 assert name.startswith('journal')
1919 assert name.startswith('journal')
1920 return os.path.join(base, name.replace('journal', 'undo', 1))
1920 return os.path.join(base, name.replace('journal', 'undo', 1))
1921
1921
1922 def instance(ui, path, create):
1922 def instance(ui, path, create):
1923 return localrepository(ui, util.urllocalpath(path), create)
1923 return localrepository(ui, util.urllocalpath(path), create)
1924
1924
1925 def islocal(path):
1925 def islocal(path):
1926 return True
1926 return True
@@ -1,1157 +1,1157
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases
10 import util, error, osutil, revset, similar, encoding, phases
11 import pathutil
11 import pathutil
12 import match as matchmod
12 import match as matchmod
13 import os, errno, re, glob, tempfile, shutil, stat
13 import os, errno, re, glob, tempfile, shutil, stat
14
14
15 if os.name == 'nt':
15 if os.name == 'nt':
16 import scmwindows as scmplatform
16 import scmwindows as scmplatform
17 else:
17 else:
18 import scmposix as scmplatform
18 import scmposix as scmplatform
19
19
20 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
21 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
22
22
23 class status(tuple):
23 class status(tuple):
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 and 'ignored' properties are only relevant to the working copy.
25 and 'ignored' properties are only relevant to the working copy.
26 '''
26 '''
27
27
28 __slots__ = ()
28 __slots__ = ()
29
29
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 clean):
31 clean):
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 ignored, clean))
33 ignored, clean))
34
34
35 @property
35 @property
36 def modified(self):
36 def modified(self):
37 '''files that have been modified'''
37 '''files that have been modified'''
38 return self[0]
38 return self[0]
39
39
40 @property
40 @property
41 def added(self):
41 def added(self):
42 '''files that have been added'''
42 '''files that have been added'''
43 return self[1]
43 return self[1]
44
44
45 @property
45 @property
46 def removed(self):
46 def removed(self):
47 '''files that have been removed'''
47 '''files that have been removed'''
48 return self[2]
48 return self[2]
49
49
50 @property
50 @property
51 def deleted(self):
51 def deleted(self):
52 '''files that are in the dirstate, but have been deleted from the
52 '''files that are in the dirstate, but have been deleted from the
53 working copy (aka "missing")
53 working copy (aka "missing")
54 '''
54 '''
55 return self[3]
55 return self[3]
56
56
57 @property
57 @property
58 def unknown(self):
58 def unknown(self):
59 '''files not in the dirstate that are not ignored'''
59 '''files not in the dirstate that are not ignored'''
60 return self[4]
60 return self[4]
61
61
62 @property
62 @property
63 def ignored(self):
63 def ignored(self):
64 '''files not in the dirstate that are ignored (by _dirignore())'''
64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 return self[5]
65 return self[5]
66
66
67 @property
67 @property
68 def clean(self):
68 def clean(self):
69 '''files that have not been modified'''
69 '''files that have not been modified'''
70 return self[6]
70 return self[6]
71
71
72 def __repr__(self, *args, **kwargs):
72 def __repr__(self, *args, **kwargs):
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 'unknown=%r, ignored=%r, clean=%r>') % self)
74 'unknown=%r, ignored=%r, clean=%r>') % self)
75
75
76 def itersubrepos(ctx1, ctx2):
76 def itersubrepos(ctx1, ctx2):
77 """find subrepos in ctx1 or ctx2"""
77 """find subrepos in ctx1 or ctx2"""
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 # has been modified (in ctx2) but not yet committed (in ctx1).
80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 for subpath, ctx in sorted(subpaths.iteritems()):
83 for subpath, ctx in sorted(subpaths.iteritems()):
84 yield subpath, ctx.sub(subpath)
84 yield subpath, ctx.sub(subpath)
85
85
86 def nochangesfound(ui, repo, excluded=None):
86 def nochangesfound(ui, repo, excluded=None):
87 '''Report no changes for push/pull, excluded is None or a list of
87 '''Report no changes for push/pull, excluded is None or a list of
88 nodes excluded from the push/pull.
88 nodes excluded from the push/pull.
89 '''
89 '''
90 secretlist = []
90 secretlist = []
91 if excluded:
91 if excluded:
92 for n in excluded:
92 for n in excluded:
93 if n not in repo:
93 if n not in repo:
94 # discovery should not have included the filtered revision,
94 # discovery should not have included the filtered revision,
95 # we have to explicitly exclude it until discovery is cleanup.
95 # we have to explicitly exclude it until discovery is cleanup.
96 continue
96 continue
97 ctx = repo[n]
97 ctx = repo[n]
98 if ctx.phase() >= phases.secret and not ctx.extinct():
98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 secretlist.append(n)
99 secretlist.append(n)
100
100
101 if secretlist:
101 if secretlist:
102 ui.status(_("no changes found (ignored %d secret changesets)\n")
102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 % len(secretlist))
103 % len(secretlist))
104 else:
104 else:
105 ui.status(_("no changes found\n"))
105 ui.status(_("no changes found\n"))
106
106
107 def checknewlabel(repo, lbl, kind):
107 def checknewlabel(repo, lbl, kind):
108 # Do not use the "kind" parameter in ui output.
108 # Do not use the "kind" parameter in ui output.
109 # It makes strings difficult to translate.
109 # It makes strings difficult to translate.
110 if lbl in ['tip', '.', 'null']:
110 if lbl in ['tip', '.', 'null']:
111 raise util.Abort(_("the name '%s' is reserved") % lbl)
111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 for c in (':', '\0', '\n', '\r'):
112 for c in (':', '\0', '\n', '\r'):
113 if c in lbl:
113 if c in lbl:
114 raise util.Abort(_("%r cannot be used in a name") % c)
114 raise util.Abort(_("%r cannot be used in a name") % c)
115 try:
115 try:
116 int(lbl)
116 int(lbl)
117 raise util.Abort(_("cannot use an integer as a name"))
117 raise util.Abort(_("cannot use an integer as a name"))
118 except ValueError:
118 except ValueError:
119 pass
119 pass
120
120
121 def checkfilename(f):
121 def checkfilename(f):
122 '''Check that the filename f is an acceptable filename for a tracked file'''
122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 if '\r' in f or '\n' in f:
123 if '\r' in f or '\n' in f:
124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125
125
126 def checkportable(ui, f):
126 def checkportable(ui, f):
127 '''Check if filename f is portable and warn or abort depending on config'''
127 '''Check if filename f is portable and warn or abort depending on config'''
128 checkfilename(f)
128 checkfilename(f)
129 abort, warn = checkportabilityalert(ui)
129 abort, warn = checkportabilityalert(ui)
130 if abort or warn:
130 if abort or warn:
131 msg = util.checkwinfilename(f)
131 msg = util.checkwinfilename(f)
132 if msg:
132 if msg:
133 msg = "%s: %r" % (msg, f)
133 msg = "%s: %r" % (msg, f)
134 if abort:
134 if abort:
135 raise util.Abort(msg)
135 raise util.Abort(msg)
136 ui.warn(_("warning: %s\n") % msg)
136 ui.warn(_("warning: %s\n") % msg)
137
137
138 def checkportabilityalert(ui):
138 def checkportabilityalert(ui):
139 '''check if the user's config requests nothing, a warning, or abort for
139 '''check if the user's config requests nothing, a warning, or abort for
140 non-portable filenames'''
140 non-portable filenames'''
141 val = ui.config('ui', 'portablefilenames', 'warn')
141 val = ui.config('ui', 'portablefilenames', 'warn')
142 lval = val.lower()
142 lval = val.lower()
143 bval = util.parsebool(val)
143 bval = util.parsebool(val)
144 abort = os.name == 'nt' or lval == 'abort'
144 abort = os.name == 'nt' or lval == 'abort'
145 warn = bval or lval == 'warn'
145 warn = bval or lval == 'warn'
146 if bval is None and not (warn or abort or lval == 'ignore'):
146 if bval is None and not (warn or abort or lval == 'ignore'):
147 raise error.ConfigError(
147 raise error.ConfigError(
148 _("ui.portablefilenames value is invalid ('%s')") % val)
148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 return abort, warn
149 return abort, warn
150
150
151 class casecollisionauditor(object):
151 class casecollisionauditor(object):
152 def __init__(self, ui, abort, dirstate):
152 def __init__(self, ui, abort, dirstate):
153 self._ui = ui
153 self._ui = ui
154 self._abort = abort
154 self._abort = abort
155 allfiles = '\0'.join(dirstate._map)
155 allfiles = '\0'.join(dirstate._map)
156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 self._dirstate = dirstate
157 self._dirstate = dirstate
158 # The purpose of _newfiles is so that we don't complain about
158 # The purpose of _newfiles is so that we don't complain about
159 # case collisions if someone were to call this object with the
159 # case collisions if someone were to call this object with the
160 # same filename twice.
160 # same filename twice.
161 self._newfiles = set()
161 self._newfiles = set()
162
162
163 def __call__(self, f):
163 def __call__(self, f):
164 if f in self._newfiles:
164 if f in self._newfiles:
165 return
165 return
166 fl = encoding.lower(f)
166 fl = encoding.lower(f)
167 if fl in self._loweredfiles and f not in self._dirstate:
167 if fl in self._loweredfiles and f not in self._dirstate:
168 msg = _('possible case-folding collision for %s') % f
168 msg = _('possible case-folding collision for %s') % f
169 if self._abort:
169 if self._abort:
170 raise util.Abort(msg)
170 raise util.Abort(msg)
171 self._ui.warn(_("warning: %s\n") % msg)
171 self._ui.warn(_("warning: %s\n") % msg)
172 self._loweredfiles.add(fl)
172 self._loweredfiles.add(fl)
173 self._newfiles.add(f)
173 self._newfiles.add(f)
174
174
175 def develwarn(tui, msg):
175 def develwarn(tui, msg):
176 """issue a developer warning message"""
176 """issue a developer warning message"""
177 if tui.tracebackflag:
177 if tui.tracebackflag:
178 util.debugstacktrace(msg, 2)
178 util.debugstacktrace(msg, 2)
179 else:
179 else:
180 tui.write_err(msg)
180 tui.write_err(msg + '\n')
181
181
182 def filteredhash(repo, maxrev):
182 def filteredhash(repo, maxrev):
183 """build hash of filtered revisions in the current repoview.
183 """build hash of filtered revisions in the current repoview.
184
184
185 Multiple caches perform up-to-date validation by checking that the
185 Multiple caches perform up-to-date validation by checking that the
186 tiprev and tipnode stored in the cache file match the current repository.
186 tiprev and tipnode stored in the cache file match the current repository.
187 However, this is not sufficient for validating repoviews because the set
187 However, this is not sufficient for validating repoviews because the set
188 of revisions in the view may change without the repository tiprev and
188 of revisions in the view may change without the repository tiprev and
189 tipnode changing.
189 tipnode changing.
190
190
191 This function hashes all the revs filtered from the view and returns
191 This function hashes all the revs filtered from the view and returns
192 that SHA-1 digest.
192 that SHA-1 digest.
193 """
193 """
194 cl = repo.changelog
194 cl = repo.changelog
195 if not cl.filteredrevs:
195 if not cl.filteredrevs:
196 return None
196 return None
197 key = None
197 key = None
198 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
198 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
199 if revs:
199 if revs:
200 s = util.sha1()
200 s = util.sha1()
201 for rev in revs:
201 for rev in revs:
202 s.update('%s;' % rev)
202 s.update('%s;' % rev)
203 key = s.digest()
203 key = s.digest()
204 return key
204 return key
205
205
206 class abstractvfs(object):
206 class abstractvfs(object):
207 """Abstract base class; cannot be instantiated"""
207 """Abstract base class; cannot be instantiated"""
208
208
209 def __init__(self, *args, **kwargs):
209 def __init__(self, *args, **kwargs):
210 '''Prevent instantiation; don't call this from subclasses.'''
210 '''Prevent instantiation; don't call this from subclasses.'''
211 raise NotImplementedError('attempted instantiating ' + str(type(self)))
211 raise NotImplementedError('attempted instantiating ' + str(type(self)))
212
212
213 def tryread(self, path):
213 def tryread(self, path):
214 '''gracefully return an empty string for missing files'''
214 '''gracefully return an empty string for missing files'''
215 try:
215 try:
216 return self.read(path)
216 return self.read(path)
217 except IOError, inst:
217 except IOError, inst:
218 if inst.errno != errno.ENOENT:
218 if inst.errno != errno.ENOENT:
219 raise
219 raise
220 return ""
220 return ""
221
221
222 def tryreadlines(self, path, mode='rb'):
222 def tryreadlines(self, path, mode='rb'):
223 '''gracefully return an empty array for missing files'''
223 '''gracefully return an empty array for missing files'''
224 try:
224 try:
225 return self.readlines(path, mode=mode)
225 return self.readlines(path, mode=mode)
226 except IOError, inst:
226 except IOError, inst:
227 if inst.errno != errno.ENOENT:
227 if inst.errno != errno.ENOENT:
228 raise
228 raise
229 return []
229 return []
230
230
231 def open(self, path, mode="r", text=False, atomictemp=False,
231 def open(self, path, mode="r", text=False, atomictemp=False,
232 notindexed=False):
232 notindexed=False):
233 '''Open ``path`` file, which is relative to vfs root.
233 '''Open ``path`` file, which is relative to vfs root.
234
234
235 Newly created directories are marked as "not to be indexed by
235 Newly created directories are marked as "not to be indexed by
236 the content indexing service", if ``notindexed`` is specified
236 the content indexing service", if ``notindexed`` is specified
237 for "write" mode access.
237 for "write" mode access.
238 '''
238 '''
239 self.open = self.__call__
239 self.open = self.__call__
240 return self.__call__(path, mode, text, atomictemp, notindexed)
240 return self.__call__(path, mode, text, atomictemp, notindexed)
241
241
242 def read(self, path):
242 def read(self, path):
243 fp = self(path, 'rb')
243 fp = self(path, 'rb')
244 try:
244 try:
245 return fp.read()
245 return fp.read()
246 finally:
246 finally:
247 fp.close()
247 fp.close()
248
248
249 def readlines(self, path, mode='rb'):
249 def readlines(self, path, mode='rb'):
250 fp = self(path, mode=mode)
250 fp = self(path, mode=mode)
251 try:
251 try:
252 return fp.readlines()
252 return fp.readlines()
253 finally:
253 finally:
254 fp.close()
254 fp.close()
255
255
256 def write(self, path, data):
256 def write(self, path, data):
257 fp = self(path, 'wb')
257 fp = self(path, 'wb')
258 try:
258 try:
259 return fp.write(data)
259 return fp.write(data)
260 finally:
260 finally:
261 fp.close()
261 fp.close()
262
262
263 def writelines(self, path, data, mode='wb', notindexed=False):
263 def writelines(self, path, data, mode='wb', notindexed=False):
264 fp = self(path, mode=mode, notindexed=notindexed)
264 fp = self(path, mode=mode, notindexed=notindexed)
265 try:
265 try:
266 return fp.writelines(data)
266 return fp.writelines(data)
267 finally:
267 finally:
268 fp.close()
268 fp.close()
269
269
270 def append(self, path, data):
270 def append(self, path, data):
271 fp = self(path, 'ab')
271 fp = self(path, 'ab')
272 try:
272 try:
273 return fp.write(data)
273 return fp.write(data)
274 finally:
274 finally:
275 fp.close()
275 fp.close()
276
276
277 def chmod(self, path, mode):
277 def chmod(self, path, mode):
278 return os.chmod(self.join(path), mode)
278 return os.chmod(self.join(path), mode)
279
279
280 def exists(self, path=None):
280 def exists(self, path=None):
281 return os.path.exists(self.join(path))
281 return os.path.exists(self.join(path))
282
282
283 def fstat(self, fp):
283 def fstat(self, fp):
284 return util.fstat(fp)
284 return util.fstat(fp)
285
285
286 def isdir(self, path=None):
286 def isdir(self, path=None):
287 return os.path.isdir(self.join(path))
287 return os.path.isdir(self.join(path))
288
288
289 def isfile(self, path=None):
289 def isfile(self, path=None):
290 return os.path.isfile(self.join(path))
290 return os.path.isfile(self.join(path))
291
291
292 def islink(self, path=None):
292 def islink(self, path=None):
293 return os.path.islink(self.join(path))
293 return os.path.islink(self.join(path))
294
294
295 def reljoin(self, *paths):
295 def reljoin(self, *paths):
296 """join various elements of a path together (as os.path.join would do)
296 """join various elements of a path together (as os.path.join would do)
297
297
298 The vfs base is not injected so that path stay relative. This exists
298 The vfs base is not injected so that path stay relative. This exists
299 to allow handling of strange encoding if needed."""
299 to allow handling of strange encoding if needed."""
300 return os.path.join(*paths)
300 return os.path.join(*paths)
301
301
302 def split(self, path):
302 def split(self, path):
303 """split top-most element of a path (as os.path.split would do)
303 """split top-most element of a path (as os.path.split would do)
304
304
305 This exists to allow handling of strange encoding if needed."""
305 This exists to allow handling of strange encoding if needed."""
306 return os.path.split(path)
306 return os.path.split(path)
307
307
308 def lexists(self, path=None):
308 def lexists(self, path=None):
309 return os.path.lexists(self.join(path))
309 return os.path.lexists(self.join(path))
310
310
311 def lstat(self, path=None):
311 def lstat(self, path=None):
312 return os.lstat(self.join(path))
312 return os.lstat(self.join(path))
313
313
314 def listdir(self, path=None):
314 def listdir(self, path=None):
315 return os.listdir(self.join(path))
315 return os.listdir(self.join(path))
316
316
317 def makedir(self, path=None, notindexed=True):
317 def makedir(self, path=None, notindexed=True):
318 return util.makedir(self.join(path), notindexed)
318 return util.makedir(self.join(path), notindexed)
319
319
320 def makedirs(self, path=None, mode=None):
320 def makedirs(self, path=None, mode=None):
321 return util.makedirs(self.join(path), mode)
321 return util.makedirs(self.join(path), mode)
322
322
323 def makelock(self, info, path):
323 def makelock(self, info, path):
324 return util.makelock(info, self.join(path))
324 return util.makelock(info, self.join(path))
325
325
326 def mkdir(self, path=None):
326 def mkdir(self, path=None):
327 return os.mkdir(self.join(path))
327 return os.mkdir(self.join(path))
328
328
329 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
329 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
330 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
330 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
331 dir=self.join(dir), text=text)
331 dir=self.join(dir), text=text)
332 dname, fname = util.split(name)
332 dname, fname = util.split(name)
333 if dir:
333 if dir:
334 return fd, os.path.join(dir, fname)
334 return fd, os.path.join(dir, fname)
335 else:
335 else:
336 return fd, fname
336 return fd, fname
337
337
338 def readdir(self, path=None, stat=None, skip=None):
338 def readdir(self, path=None, stat=None, skip=None):
339 return osutil.listdir(self.join(path), stat, skip)
339 return osutil.listdir(self.join(path), stat, skip)
340
340
341 def readlock(self, path):
341 def readlock(self, path):
342 return util.readlock(self.join(path))
342 return util.readlock(self.join(path))
343
343
344 def rename(self, src, dst):
344 def rename(self, src, dst):
345 return util.rename(self.join(src), self.join(dst))
345 return util.rename(self.join(src), self.join(dst))
346
346
347 def readlink(self, path):
347 def readlink(self, path):
348 return os.readlink(self.join(path))
348 return os.readlink(self.join(path))
349
349
350 def removedirs(self, path=None):
350 def removedirs(self, path=None):
351 """Remove a leaf directory and all empty intermediate ones
351 """Remove a leaf directory and all empty intermediate ones
352 """
352 """
353 return util.removedirs(self.join(path))
353 return util.removedirs(self.join(path))
354
354
355 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
355 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
356 """Remove a directory tree recursively
356 """Remove a directory tree recursively
357
357
358 If ``forcibly``, this tries to remove READ-ONLY files, too.
358 If ``forcibly``, this tries to remove READ-ONLY files, too.
359 """
359 """
360 if forcibly:
360 if forcibly:
361 def onerror(function, path, excinfo):
361 def onerror(function, path, excinfo):
362 if function is not os.remove:
362 if function is not os.remove:
363 raise
363 raise
364 # read-only files cannot be unlinked under Windows
364 # read-only files cannot be unlinked under Windows
365 s = os.stat(path)
365 s = os.stat(path)
366 if (s.st_mode & stat.S_IWRITE) != 0:
366 if (s.st_mode & stat.S_IWRITE) != 0:
367 raise
367 raise
368 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
368 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
369 os.remove(path)
369 os.remove(path)
370 else:
370 else:
371 onerror = None
371 onerror = None
372 return shutil.rmtree(self.join(path),
372 return shutil.rmtree(self.join(path),
373 ignore_errors=ignore_errors, onerror=onerror)
373 ignore_errors=ignore_errors, onerror=onerror)
374
374
375 def setflags(self, path, l, x):
375 def setflags(self, path, l, x):
376 return util.setflags(self.join(path), l, x)
376 return util.setflags(self.join(path), l, x)
377
377
378 def stat(self, path=None):
378 def stat(self, path=None):
379 return os.stat(self.join(path))
379 return os.stat(self.join(path))
380
380
381 def unlink(self, path=None):
381 def unlink(self, path=None):
382 return util.unlink(self.join(path))
382 return util.unlink(self.join(path))
383
383
384 def unlinkpath(self, path=None, ignoremissing=False):
384 def unlinkpath(self, path=None, ignoremissing=False):
385 return util.unlinkpath(self.join(path), ignoremissing)
385 return util.unlinkpath(self.join(path), ignoremissing)
386
386
387 def utime(self, path=None, t=None):
387 def utime(self, path=None, t=None):
388 return os.utime(self.join(path), t)
388 return os.utime(self.join(path), t)
389
389
390 def walk(self, path=None, onerror=None):
390 def walk(self, path=None, onerror=None):
391 """Yield (dirpath, dirs, files) tuple for each directories under path
391 """Yield (dirpath, dirs, files) tuple for each directories under path
392
392
393 ``dirpath`` is relative one from the root of this vfs. This
393 ``dirpath`` is relative one from the root of this vfs. This
394 uses ``os.sep`` as path separator, even you specify POSIX
394 uses ``os.sep`` as path separator, even you specify POSIX
395 style ``path``.
395 style ``path``.
396
396
397 "The root of this vfs" is represented as empty ``dirpath``.
397 "The root of this vfs" is represented as empty ``dirpath``.
398 """
398 """
399 root = os.path.normpath(self.join(None))
399 root = os.path.normpath(self.join(None))
400 # when dirpath == root, dirpath[prefixlen:] becomes empty
400 # when dirpath == root, dirpath[prefixlen:] becomes empty
401 # because len(dirpath) < prefixlen.
401 # because len(dirpath) < prefixlen.
402 prefixlen = len(pathutil.normasprefix(root))
402 prefixlen = len(pathutil.normasprefix(root))
403 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
403 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
404 yield (dirpath[prefixlen:], dirs, files)
404 yield (dirpath[prefixlen:], dirs, files)
405
405
406 class vfs(abstractvfs):
406 class vfs(abstractvfs):
407 '''Operate files relative to a base directory
407 '''Operate files relative to a base directory
408
408
409 This class is used to hide the details of COW semantics and
409 This class is used to hide the details of COW semantics and
410 remote file access from higher level code.
410 remote file access from higher level code.
411 '''
411 '''
412 def __init__(self, base, audit=True, expandpath=False, realpath=False):
412 def __init__(self, base, audit=True, expandpath=False, realpath=False):
413 if expandpath:
413 if expandpath:
414 base = util.expandpath(base)
414 base = util.expandpath(base)
415 if realpath:
415 if realpath:
416 base = os.path.realpath(base)
416 base = os.path.realpath(base)
417 self.base = base
417 self.base = base
418 self._setmustaudit(audit)
418 self._setmustaudit(audit)
419 self.createmode = None
419 self.createmode = None
420 self._trustnlink = None
420 self._trustnlink = None
421
421
422 def _getmustaudit(self):
422 def _getmustaudit(self):
423 return self._audit
423 return self._audit
424
424
425 def _setmustaudit(self, onoff):
425 def _setmustaudit(self, onoff):
426 self._audit = onoff
426 self._audit = onoff
427 if onoff:
427 if onoff:
428 self.audit = pathutil.pathauditor(self.base)
428 self.audit = pathutil.pathauditor(self.base)
429 else:
429 else:
430 self.audit = util.always
430 self.audit = util.always
431
431
432 mustaudit = property(_getmustaudit, _setmustaudit)
432 mustaudit = property(_getmustaudit, _setmustaudit)
433
433
434 @util.propertycache
434 @util.propertycache
435 def _cansymlink(self):
435 def _cansymlink(self):
436 return util.checklink(self.base)
436 return util.checklink(self.base)
437
437
438 @util.propertycache
438 @util.propertycache
439 def _chmod(self):
439 def _chmod(self):
440 return util.checkexec(self.base)
440 return util.checkexec(self.base)
441
441
442 def _fixfilemode(self, name):
442 def _fixfilemode(self, name):
443 if self.createmode is None or not self._chmod:
443 if self.createmode is None or not self._chmod:
444 return
444 return
445 os.chmod(name, self.createmode & 0666)
445 os.chmod(name, self.createmode & 0666)
446
446
447 def __call__(self, path, mode="r", text=False, atomictemp=False,
447 def __call__(self, path, mode="r", text=False, atomictemp=False,
448 notindexed=False):
448 notindexed=False):
449 '''Open ``path`` file, which is relative to vfs root.
449 '''Open ``path`` file, which is relative to vfs root.
450
450
451 Newly created directories are marked as "not to be indexed by
451 Newly created directories are marked as "not to be indexed by
452 the content indexing service", if ``notindexed`` is specified
452 the content indexing service", if ``notindexed`` is specified
453 for "write" mode access.
453 for "write" mode access.
454 '''
454 '''
455 if self._audit:
455 if self._audit:
456 r = util.checkosfilename(path)
456 r = util.checkosfilename(path)
457 if r:
457 if r:
458 raise util.Abort("%s: %r" % (r, path))
458 raise util.Abort("%s: %r" % (r, path))
459 self.audit(path)
459 self.audit(path)
460 f = self.join(path)
460 f = self.join(path)
461
461
462 if not text and "b" not in mode:
462 if not text and "b" not in mode:
463 mode += "b" # for that other OS
463 mode += "b" # for that other OS
464
464
465 nlink = -1
465 nlink = -1
466 if mode not in ('r', 'rb'):
466 if mode not in ('r', 'rb'):
467 dirname, basename = util.split(f)
467 dirname, basename = util.split(f)
468 # If basename is empty, then the path is malformed because it points
468 # If basename is empty, then the path is malformed because it points
469 # to a directory. Let the posixfile() call below raise IOError.
469 # to a directory. Let the posixfile() call below raise IOError.
470 if basename:
470 if basename:
471 if atomictemp:
471 if atomictemp:
472 util.ensuredirs(dirname, self.createmode, notindexed)
472 util.ensuredirs(dirname, self.createmode, notindexed)
473 return util.atomictempfile(f, mode, self.createmode)
473 return util.atomictempfile(f, mode, self.createmode)
474 try:
474 try:
475 if 'w' in mode:
475 if 'w' in mode:
476 util.unlink(f)
476 util.unlink(f)
477 nlink = 0
477 nlink = 0
478 else:
478 else:
479 # nlinks() may behave differently for files on Windows
479 # nlinks() may behave differently for files on Windows
480 # shares if the file is open.
480 # shares if the file is open.
481 fd = util.posixfile(f)
481 fd = util.posixfile(f)
482 nlink = util.nlinks(f)
482 nlink = util.nlinks(f)
483 if nlink < 1:
483 if nlink < 1:
484 nlink = 2 # force mktempcopy (issue1922)
484 nlink = 2 # force mktempcopy (issue1922)
485 fd.close()
485 fd.close()
486 except (OSError, IOError), e:
486 except (OSError, IOError), e:
487 if e.errno != errno.ENOENT:
487 if e.errno != errno.ENOENT:
488 raise
488 raise
489 nlink = 0
489 nlink = 0
490 util.ensuredirs(dirname, self.createmode, notindexed)
490 util.ensuredirs(dirname, self.createmode, notindexed)
491 if nlink > 0:
491 if nlink > 0:
492 if self._trustnlink is None:
492 if self._trustnlink is None:
493 self._trustnlink = nlink > 1 or util.checknlink(f)
493 self._trustnlink = nlink > 1 or util.checknlink(f)
494 if nlink > 1 or not self._trustnlink:
494 if nlink > 1 or not self._trustnlink:
495 util.rename(util.mktempcopy(f), f)
495 util.rename(util.mktempcopy(f), f)
496 fp = util.posixfile(f, mode)
496 fp = util.posixfile(f, mode)
497 if nlink == 0:
497 if nlink == 0:
498 self._fixfilemode(f)
498 self._fixfilemode(f)
499 return fp
499 return fp
500
500
501 def symlink(self, src, dst):
501 def symlink(self, src, dst):
502 self.audit(dst)
502 self.audit(dst)
503 linkname = self.join(dst)
503 linkname = self.join(dst)
504 try:
504 try:
505 os.unlink(linkname)
505 os.unlink(linkname)
506 except OSError:
506 except OSError:
507 pass
507 pass
508
508
509 util.ensuredirs(os.path.dirname(linkname), self.createmode)
509 util.ensuredirs(os.path.dirname(linkname), self.createmode)
510
510
511 if self._cansymlink:
511 if self._cansymlink:
512 try:
512 try:
513 os.symlink(src, linkname)
513 os.symlink(src, linkname)
514 except OSError, err:
514 except OSError, err:
515 raise OSError(err.errno, _('could not symlink to %r: %s') %
515 raise OSError(err.errno, _('could not symlink to %r: %s') %
516 (src, err.strerror), linkname)
516 (src, err.strerror), linkname)
517 else:
517 else:
518 self.write(dst, src)
518 self.write(dst, src)
519
519
520 def join(self, path, *insidef):
520 def join(self, path, *insidef):
521 if path:
521 if path:
522 return os.path.join(self.base, path, *insidef)
522 return os.path.join(self.base, path, *insidef)
523 else:
523 else:
524 return self.base
524 return self.base
525
525
526 opener = vfs
526 opener = vfs
527
527
528 class auditvfs(object):
528 class auditvfs(object):
529 def __init__(self, vfs):
529 def __init__(self, vfs):
530 self.vfs = vfs
530 self.vfs = vfs
531
531
532 def _getmustaudit(self):
532 def _getmustaudit(self):
533 return self.vfs.mustaudit
533 return self.vfs.mustaudit
534
534
535 def _setmustaudit(self, onoff):
535 def _setmustaudit(self, onoff):
536 self.vfs.mustaudit = onoff
536 self.vfs.mustaudit = onoff
537
537
538 mustaudit = property(_getmustaudit, _setmustaudit)
538 mustaudit = property(_getmustaudit, _setmustaudit)
539
539
540 class filtervfs(abstractvfs, auditvfs):
540 class filtervfs(abstractvfs, auditvfs):
541 '''Wrapper vfs for filtering filenames with a function.'''
541 '''Wrapper vfs for filtering filenames with a function.'''
542
542
543 def __init__(self, vfs, filter):
543 def __init__(self, vfs, filter):
544 auditvfs.__init__(self, vfs)
544 auditvfs.__init__(self, vfs)
545 self._filter = filter
545 self._filter = filter
546
546
547 def __call__(self, path, *args, **kwargs):
547 def __call__(self, path, *args, **kwargs):
548 return self.vfs(self._filter(path), *args, **kwargs)
548 return self.vfs(self._filter(path), *args, **kwargs)
549
549
550 def join(self, path, *insidef):
550 def join(self, path, *insidef):
551 if path:
551 if path:
552 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
552 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
553 else:
553 else:
554 return self.vfs.join(path)
554 return self.vfs.join(path)
555
555
556 filteropener = filtervfs
556 filteropener = filtervfs
557
557
558 class readonlyvfs(abstractvfs, auditvfs):
558 class readonlyvfs(abstractvfs, auditvfs):
559 '''Wrapper vfs preventing any writing.'''
559 '''Wrapper vfs preventing any writing.'''
560
560
561 def __init__(self, vfs):
561 def __init__(self, vfs):
562 auditvfs.__init__(self, vfs)
562 auditvfs.__init__(self, vfs)
563
563
564 def __call__(self, path, mode='r', *args, **kw):
564 def __call__(self, path, mode='r', *args, **kw):
565 if mode not in ('r', 'rb'):
565 if mode not in ('r', 'rb'):
566 raise util.Abort('this vfs is read only')
566 raise util.Abort('this vfs is read only')
567 return self.vfs(path, mode, *args, **kw)
567 return self.vfs(path, mode, *args, **kw)
568
568
569
569
570 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
570 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
571 '''yield every hg repository under path, always recursively.
571 '''yield every hg repository under path, always recursively.
572 The recurse flag will only control recursion into repo working dirs'''
572 The recurse flag will only control recursion into repo working dirs'''
573 def errhandler(err):
573 def errhandler(err):
574 if err.filename == path:
574 if err.filename == path:
575 raise err
575 raise err
576 samestat = getattr(os.path, 'samestat', None)
576 samestat = getattr(os.path, 'samestat', None)
577 if followsym and samestat is not None:
577 if followsym and samestat is not None:
578 def adddir(dirlst, dirname):
578 def adddir(dirlst, dirname):
579 match = False
579 match = False
580 dirstat = os.stat(dirname)
580 dirstat = os.stat(dirname)
581 for lstdirstat in dirlst:
581 for lstdirstat in dirlst:
582 if samestat(dirstat, lstdirstat):
582 if samestat(dirstat, lstdirstat):
583 match = True
583 match = True
584 break
584 break
585 if not match:
585 if not match:
586 dirlst.append(dirstat)
586 dirlst.append(dirstat)
587 return not match
587 return not match
588 else:
588 else:
589 followsym = False
589 followsym = False
590
590
591 if (seen_dirs is None) and followsym:
591 if (seen_dirs is None) and followsym:
592 seen_dirs = []
592 seen_dirs = []
593 adddir(seen_dirs, path)
593 adddir(seen_dirs, path)
594 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
594 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
595 dirs.sort()
595 dirs.sort()
596 if '.hg' in dirs:
596 if '.hg' in dirs:
597 yield root # found a repository
597 yield root # found a repository
598 qroot = os.path.join(root, '.hg', 'patches')
598 qroot = os.path.join(root, '.hg', 'patches')
599 if os.path.isdir(os.path.join(qroot, '.hg')):
599 if os.path.isdir(os.path.join(qroot, '.hg')):
600 yield qroot # we have a patch queue repo here
600 yield qroot # we have a patch queue repo here
601 if recurse:
601 if recurse:
602 # avoid recursing inside the .hg directory
602 # avoid recursing inside the .hg directory
603 dirs.remove('.hg')
603 dirs.remove('.hg')
604 else:
604 else:
605 dirs[:] = [] # don't descend further
605 dirs[:] = [] # don't descend further
606 elif followsym:
606 elif followsym:
607 newdirs = []
607 newdirs = []
608 for d in dirs:
608 for d in dirs:
609 fname = os.path.join(root, d)
609 fname = os.path.join(root, d)
610 if adddir(seen_dirs, fname):
610 if adddir(seen_dirs, fname):
611 if os.path.islink(fname):
611 if os.path.islink(fname):
612 for hgname in walkrepos(fname, True, seen_dirs):
612 for hgname in walkrepos(fname, True, seen_dirs):
613 yield hgname
613 yield hgname
614 else:
614 else:
615 newdirs.append(d)
615 newdirs.append(d)
616 dirs[:] = newdirs
616 dirs[:] = newdirs
617
617
618 def osrcpath():
618 def osrcpath():
619 '''return default os-specific hgrc search path'''
619 '''return default os-specific hgrc search path'''
620 path = []
620 path = []
621 defaultpath = os.path.join(util.datapath, 'default.d')
621 defaultpath = os.path.join(util.datapath, 'default.d')
622 if os.path.isdir(defaultpath):
622 if os.path.isdir(defaultpath):
623 for f, kind in osutil.listdir(defaultpath):
623 for f, kind in osutil.listdir(defaultpath):
624 if f.endswith('.rc'):
624 if f.endswith('.rc'):
625 path.append(os.path.join(defaultpath, f))
625 path.append(os.path.join(defaultpath, f))
626 path.extend(systemrcpath())
626 path.extend(systemrcpath())
627 path.extend(userrcpath())
627 path.extend(userrcpath())
628 path = [os.path.normpath(f) for f in path]
628 path = [os.path.normpath(f) for f in path]
629 return path
629 return path
630
630
631 _rcpath = None
631 _rcpath = None
632
632
633 def rcpath():
633 def rcpath():
634 '''return hgrc search path. if env var HGRCPATH is set, use it.
634 '''return hgrc search path. if env var HGRCPATH is set, use it.
635 for each item in path, if directory, use files ending in .rc,
635 for each item in path, if directory, use files ending in .rc,
636 else use item.
636 else use item.
637 make HGRCPATH empty to only look in .hg/hgrc of current repo.
637 make HGRCPATH empty to only look in .hg/hgrc of current repo.
638 if no HGRCPATH, use default os-specific path.'''
638 if no HGRCPATH, use default os-specific path.'''
639 global _rcpath
639 global _rcpath
640 if _rcpath is None:
640 if _rcpath is None:
641 if 'HGRCPATH' in os.environ:
641 if 'HGRCPATH' in os.environ:
642 _rcpath = []
642 _rcpath = []
643 for p in os.environ['HGRCPATH'].split(os.pathsep):
643 for p in os.environ['HGRCPATH'].split(os.pathsep):
644 if not p:
644 if not p:
645 continue
645 continue
646 p = util.expandpath(p)
646 p = util.expandpath(p)
647 if os.path.isdir(p):
647 if os.path.isdir(p):
648 for f, kind in osutil.listdir(p):
648 for f, kind in osutil.listdir(p):
649 if f.endswith('.rc'):
649 if f.endswith('.rc'):
650 _rcpath.append(os.path.join(p, f))
650 _rcpath.append(os.path.join(p, f))
651 else:
651 else:
652 _rcpath.append(p)
652 _rcpath.append(p)
653 else:
653 else:
654 _rcpath = osrcpath()
654 _rcpath = osrcpath()
655 return _rcpath
655 return _rcpath
656
656
657 def intrev(repo, rev):
657 def intrev(repo, rev):
658 """Return integer for a given revision that can be used in comparison or
658 """Return integer for a given revision that can be used in comparison or
659 arithmetic operation"""
659 arithmetic operation"""
660 if rev is None:
660 if rev is None:
661 return len(repo)
661 return len(repo)
662 return rev
662 return rev
663
663
664 def revsingle(repo, revspec, default='.'):
664 def revsingle(repo, revspec, default='.'):
665 if not revspec and revspec != 0:
665 if not revspec and revspec != 0:
666 return repo[default]
666 return repo[default]
667
667
668 l = revrange(repo, [revspec])
668 l = revrange(repo, [revspec])
669 if not l:
669 if not l:
670 raise util.Abort(_('empty revision set'))
670 raise util.Abort(_('empty revision set'))
671 return repo[l.last()]
671 return repo[l.last()]
672
672
673 def revpair(repo, revs):
673 def revpair(repo, revs):
674 if not revs:
674 if not revs:
675 return repo.dirstate.p1(), None
675 return repo.dirstate.p1(), None
676
676
677 l = revrange(repo, revs)
677 l = revrange(repo, revs)
678
678
679 if not l:
679 if not l:
680 first = second = None
680 first = second = None
681 elif l.isascending():
681 elif l.isascending():
682 first = l.min()
682 first = l.min()
683 second = l.max()
683 second = l.max()
684 elif l.isdescending():
684 elif l.isdescending():
685 first = l.max()
685 first = l.max()
686 second = l.min()
686 second = l.min()
687 else:
687 else:
688 first = l.first()
688 first = l.first()
689 second = l.last()
689 second = l.last()
690
690
691 if first is None:
691 if first is None:
692 raise util.Abort(_('empty revision range'))
692 raise util.Abort(_('empty revision range'))
693
693
694 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
694 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
695 return repo.lookup(first), None
695 return repo.lookup(first), None
696
696
697 return repo.lookup(first), repo.lookup(second)
697 return repo.lookup(first), repo.lookup(second)
698
698
699 _revrangesep = ':'
699 _revrangesep = ':'
700
700
701 def revrange(repo, revs):
701 def revrange(repo, revs):
702 """Yield revision as strings from a list of revision specifications."""
702 """Yield revision as strings from a list of revision specifications."""
703
703
704 def revfix(repo, val, defval):
704 def revfix(repo, val, defval):
705 if not val and val != 0 and defval is not None:
705 if not val and val != 0 and defval is not None:
706 return defval
706 return defval
707 return repo[val].rev()
707 return repo[val].rev()
708
708
709 seen, l = set(), revset.baseset([])
709 seen, l = set(), revset.baseset([])
710
710
711 revsetaliases = [alias for (alias, _) in
711 revsetaliases = [alias for (alias, _) in
712 repo.ui.configitems("revsetalias")]
712 repo.ui.configitems("revsetalias")]
713
713
714 for spec in revs:
714 for spec in revs:
715 if l and not seen:
715 if l and not seen:
716 seen = set(l)
716 seen = set(l)
717 # attempt to parse old-style ranges first to deal with
717 # attempt to parse old-style ranges first to deal with
718 # things like old-tag which contain query metacharacters
718 # things like old-tag which contain query metacharacters
719 try:
719 try:
720 # ... except for revset aliases without arguments. These
720 # ... except for revset aliases without arguments. These
721 # should be parsed as soon as possible, because they might
721 # should be parsed as soon as possible, because they might
722 # clash with a hash prefix.
722 # clash with a hash prefix.
723 if spec in revsetaliases:
723 if spec in revsetaliases:
724 raise error.RepoLookupError
724 raise error.RepoLookupError
725
725
726 if isinstance(spec, int):
726 if isinstance(spec, int):
727 seen.add(spec)
727 seen.add(spec)
728 l = l + revset.baseset([spec])
728 l = l + revset.baseset([spec])
729 continue
729 continue
730
730
731 if _revrangesep in spec:
731 if _revrangesep in spec:
732 start, end = spec.split(_revrangesep, 1)
732 start, end = spec.split(_revrangesep, 1)
733 if start in revsetaliases or end in revsetaliases:
733 if start in revsetaliases or end in revsetaliases:
734 raise error.RepoLookupError
734 raise error.RepoLookupError
735
735
736 start = revfix(repo, start, 0)
736 start = revfix(repo, start, 0)
737 end = revfix(repo, end, len(repo) - 1)
737 end = revfix(repo, end, len(repo) - 1)
738 if end == nullrev and start < 0:
738 if end == nullrev and start < 0:
739 start = nullrev
739 start = nullrev
740 rangeiter = repo.changelog.revs(start, end)
740 rangeiter = repo.changelog.revs(start, end)
741 if not seen and not l:
741 if not seen and not l:
742 # by far the most common case: revs = ["-1:0"]
742 # by far the most common case: revs = ["-1:0"]
743 l = revset.baseset(rangeiter)
743 l = revset.baseset(rangeiter)
744 # defer syncing seen until next iteration
744 # defer syncing seen until next iteration
745 continue
745 continue
746 newrevs = set(rangeiter)
746 newrevs = set(rangeiter)
747 if seen:
747 if seen:
748 newrevs.difference_update(seen)
748 newrevs.difference_update(seen)
749 seen.update(newrevs)
749 seen.update(newrevs)
750 else:
750 else:
751 seen = newrevs
751 seen = newrevs
752 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
752 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
753 continue
753 continue
754 elif spec and spec in repo: # single unquoted rev
754 elif spec and spec in repo: # single unquoted rev
755 rev = revfix(repo, spec, None)
755 rev = revfix(repo, spec, None)
756 if rev in seen:
756 if rev in seen:
757 continue
757 continue
758 seen.add(rev)
758 seen.add(rev)
759 l = l + revset.baseset([rev])
759 l = l + revset.baseset([rev])
760 continue
760 continue
761 except error.RepoLookupError:
761 except error.RepoLookupError:
762 pass
762 pass
763
763
764 # fall through to new-style queries if old-style fails
764 # fall through to new-style queries if old-style fails
765 m = revset.match(repo.ui, spec, repo)
765 m = revset.match(repo.ui, spec, repo)
766 if seen or l:
766 if seen or l:
767 dl = [r for r in m(repo) if r not in seen]
767 dl = [r for r in m(repo) if r not in seen]
768 l = l + revset.baseset(dl)
768 l = l + revset.baseset(dl)
769 seen.update(dl)
769 seen.update(dl)
770 else:
770 else:
771 l = m(repo)
771 l = m(repo)
772
772
773 return l
773 return l
774
774
775 def expandpats(pats):
775 def expandpats(pats):
776 '''Expand bare globs when running on windows.
776 '''Expand bare globs when running on windows.
777 On posix we assume it already has already been done by sh.'''
777 On posix we assume it already has already been done by sh.'''
778 if not util.expandglobs:
778 if not util.expandglobs:
779 return list(pats)
779 return list(pats)
780 ret = []
780 ret = []
781 for kindpat in pats:
781 for kindpat in pats:
782 kind, pat = matchmod._patsplit(kindpat, None)
782 kind, pat = matchmod._patsplit(kindpat, None)
783 if kind is None:
783 if kind is None:
784 try:
784 try:
785 globbed = glob.glob(pat)
785 globbed = glob.glob(pat)
786 except re.error:
786 except re.error:
787 globbed = [pat]
787 globbed = [pat]
788 if globbed:
788 if globbed:
789 ret.extend(globbed)
789 ret.extend(globbed)
790 continue
790 continue
791 ret.append(kindpat)
791 ret.append(kindpat)
792 return ret
792 return ret
793
793
794 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
794 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
795 '''Return a matcher and the patterns that were used.
795 '''Return a matcher and the patterns that were used.
796 The matcher will warn about bad matches.'''
796 The matcher will warn about bad matches.'''
797 if pats == ("",):
797 if pats == ("",):
798 pats = []
798 pats = []
799 if not globbed and default == 'relpath':
799 if not globbed and default == 'relpath':
800 pats = expandpats(pats or [])
800 pats = expandpats(pats or [])
801
801
802 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
802 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
803 default)
803 default)
804 def badfn(f, msg):
804 def badfn(f, msg):
805 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
805 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
806 m.bad = badfn
806 m.bad = badfn
807 if m.always():
807 if m.always():
808 pats = []
808 pats = []
809 return m, pats
809 return m, pats
810
810
811 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
811 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
812 '''Return a matcher that will warn about bad matches.'''
812 '''Return a matcher that will warn about bad matches.'''
813 return matchandpats(ctx, pats, opts, globbed, default)[0]
813 return matchandpats(ctx, pats, opts, globbed, default)[0]
814
814
815 def matchall(repo):
815 def matchall(repo):
816 '''Return a matcher that will efficiently match everything.'''
816 '''Return a matcher that will efficiently match everything.'''
817 return matchmod.always(repo.root, repo.getcwd())
817 return matchmod.always(repo.root, repo.getcwd())
818
818
819 def matchfiles(repo, files):
819 def matchfiles(repo, files):
820 '''Return a matcher that will efficiently match exactly these files.'''
820 '''Return a matcher that will efficiently match exactly these files.'''
821 return matchmod.exact(repo.root, repo.getcwd(), files)
821 return matchmod.exact(repo.root, repo.getcwd(), files)
822
822
823 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
823 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
824 m = matcher
824 m = matcher
825 if dry_run is None:
825 if dry_run is None:
826 dry_run = opts.get('dry_run')
826 dry_run = opts.get('dry_run')
827 if similarity is None:
827 if similarity is None:
828 similarity = float(opts.get('similarity') or 0)
828 similarity = float(opts.get('similarity') or 0)
829
829
830 ret = 0
830 ret = 0
831 join = lambda f: os.path.join(prefix, f)
831 join = lambda f: os.path.join(prefix, f)
832
832
833 def matchessubrepo(matcher, subpath):
833 def matchessubrepo(matcher, subpath):
834 if matcher.exact(subpath):
834 if matcher.exact(subpath):
835 return True
835 return True
836 for f in matcher.files():
836 for f in matcher.files():
837 if f.startswith(subpath):
837 if f.startswith(subpath):
838 return True
838 return True
839 return False
839 return False
840
840
841 wctx = repo[None]
841 wctx = repo[None]
842 for subpath in sorted(wctx.substate):
842 for subpath in sorted(wctx.substate):
843 if opts.get('subrepos') or matchessubrepo(m, subpath):
843 if opts.get('subrepos') or matchessubrepo(m, subpath):
844 sub = wctx.sub(subpath)
844 sub = wctx.sub(subpath)
845 try:
845 try:
846 submatch = matchmod.narrowmatcher(subpath, m)
846 submatch = matchmod.narrowmatcher(subpath, m)
847 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
847 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
848 ret = 1
848 ret = 1
849 except error.LookupError:
849 except error.LookupError:
850 repo.ui.status(_("skipping missing subrepository: %s\n")
850 repo.ui.status(_("skipping missing subrepository: %s\n")
851 % join(subpath))
851 % join(subpath))
852
852
853 rejected = []
853 rejected = []
854 origbad = m.bad
854 origbad = m.bad
855 def badfn(f, msg):
855 def badfn(f, msg):
856 if f in m.files():
856 if f in m.files():
857 origbad(f, msg)
857 origbad(f, msg)
858 rejected.append(f)
858 rejected.append(f)
859
859
860 m.bad = badfn
860 m.bad = badfn
861 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
861 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
862 m.bad = origbad
862 m.bad = origbad
863
863
864 unknownset = set(unknown + forgotten)
864 unknownset = set(unknown + forgotten)
865 toprint = unknownset.copy()
865 toprint = unknownset.copy()
866 toprint.update(deleted)
866 toprint.update(deleted)
867 for abs in sorted(toprint):
867 for abs in sorted(toprint):
868 if repo.ui.verbose or not m.exact(abs):
868 if repo.ui.verbose or not m.exact(abs):
869 if abs in unknownset:
869 if abs in unknownset:
870 status = _('adding %s\n') % m.uipath(abs)
870 status = _('adding %s\n') % m.uipath(abs)
871 else:
871 else:
872 status = _('removing %s\n') % m.uipath(abs)
872 status = _('removing %s\n') % m.uipath(abs)
873 repo.ui.status(status)
873 repo.ui.status(status)
874
874
875 renames = _findrenames(repo, m, added + unknown, removed + deleted,
875 renames = _findrenames(repo, m, added + unknown, removed + deleted,
876 similarity)
876 similarity)
877
877
878 if not dry_run:
878 if not dry_run:
879 _markchanges(repo, unknown + forgotten, deleted, renames)
879 _markchanges(repo, unknown + forgotten, deleted, renames)
880
880
881 for f in rejected:
881 for f in rejected:
882 if f in m.files():
882 if f in m.files():
883 return 1
883 return 1
884 return ret
884 return ret
885
885
886 def marktouched(repo, files, similarity=0.0):
886 def marktouched(repo, files, similarity=0.0):
887 '''Assert that files have somehow been operated upon. files are relative to
887 '''Assert that files have somehow been operated upon. files are relative to
888 the repo root.'''
888 the repo root.'''
889 m = matchfiles(repo, files)
889 m = matchfiles(repo, files)
890 rejected = []
890 rejected = []
891 m.bad = lambda x, y: rejected.append(x)
891 m.bad = lambda x, y: rejected.append(x)
892
892
893 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
893 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
894
894
895 if repo.ui.verbose:
895 if repo.ui.verbose:
896 unknownset = set(unknown + forgotten)
896 unknownset = set(unknown + forgotten)
897 toprint = unknownset.copy()
897 toprint = unknownset.copy()
898 toprint.update(deleted)
898 toprint.update(deleted)
899 for abs in sorted(toprint):
899 for abs in sorted(toprint):
900 if abs in unknownset:
900 if abs in unknownset:
901 status = _('adding %s\n') % abs
901 status = _('adding %s\n') % abs
902 else:
902 else:
903 status = _('removing %s\n') % abs
903 status = _('removing %s\n') % abs
904 repo.ui.status(status)
904 repo.ui.status(status)
905
905
906 renames = _findrenames(repo, m, added + unknown, removed + deleted,
906 renames = _findrenames(repo, m, added + unknown, removed + deleted,
907 similarity)
907 similarity)
908
908
909 _markchanges(repo, unknown + forgotten, deleted, renames)
909 _markchanges(repo, unknown + forgotten, deleted, renames)
910
910
911 for f in rejected:
911 for f in rejected:
912 if f in m.files():
912 if f in m.files():
913 return 1
913 return 1
914 return 0
914 return 0
915
915
916 def _interestingfiles(repo, matcher):
916 def _interestingfiles(repo, matcher):
917 '''Walk dirstate with matcher, looking for files that addremove would care
917 '''Walk dirstate with matcher, looking for files that addremove would care
918 about.
918 about.
919
919
920 This is different from dirstate.status because it doesn't care about
920 This is different from dirstate.status because it doesn't care about
921 whether files are modified or clean.'''
921 whether files are modified or clean.'''
922 added, unknown, deleted, removed, forgotten = [], [], [], [], []
922 added, unknown, deleted, removed, forgotten = [], [], [], [], []
923 audit_path = pathutil.pathauditor(repo.root)
923 audit_path = pathutil.pathauditor(repo.root)
924
924
925 ctx = repo[None]
925 ctx = repo[None]
926 dirstate = repo.dirstate
926 dirstate = repo.dirstate
927 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
927 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
928 full=False)
928 full=False)
929 for abs, st in walkresults.iteritems():
929 for abs, st in walkresults.iteritems():
930 dstate = dirstate[abs]
930 dstate = dirstate[abs]
931 if dstate == '?' and audit_path.check(abs):
931 if dstate == '?' and audit_path.check(abs):
932 unknown.append(abs)
932 unknown.append(abs)
933 elif dstate != 'r' and not st:
933 elif dstate != 'r' and not st:
934 deleted.append(abs)
934 deleted.append(abs)
935 elif dstate == 'r' and st:
935 elif dstate == 'r' and st:
936 forgotten.append(abs)
936 forgotten.append(abs)
937 # for finding renames
937 # for finding renames
938 elif dstate == 'r' and not st:
938 elif dstate == 'r' and not st:
939 removed.append(abs)
939 removed.append(abs)
940 elif dstate == 'a':
940 elif dstate == 'a':
941 added.append(abs)
941 added.append(abs)
942
942
943 return added, unknown, deleted, removed, forgotten
943 return added, unknown, deleted, removed, forgotten
944
944
945 def _findrenames(repo, matcher, added, removed, similarity):
945 def _findrenames(repo, matcher, added, removed, similarity):
946 '''Find renames from removed files to added ones.'''
946 '''Find renames from removed files to added ones.'''
947 renames = {}
947 renames = {}
948 if similarity > 0:
948 if similarity > 0:
949 for old, new, score in similar.findrenames(repo, added, removed,
949 for old, new, score in similar.findrenames(repo, added, removed,
950 similarity):
950 similarity):
951 if (repo.ui.verbose or not matcher.exact(old)
951 if (repo.ui.verbose or not matcher.exact(old)
952 or not matcher.exact(new)):
952 or not matcher.exact(new)):
953 repo.ui.status(_('recording removal of %s as rename to %s '
953 repo.ui.status(_('recording removal of %s as rename to %s '
954 '(%d%% similar)\n') %
954 '(%d%% similar)\n') %
955 (matcher.rel(old), matcher.rel(new),
955 (matcher.rel(old), matcher.rel(new),
956 score * 100))
956 score * 100))
957 renames[new] = old
957 renames[new] = old
958 return renames
958 return renames
959
959
960 def _markchanges(repo, unknown, deleted, renames):
960 def _markchanges(repo, unknown, deleted, renames):
961 '''Marks the files in unknown as added, the files in deleted as removed,
961 '''Marks the files in unknown as added, the files in deleted as removed,
962 and the files in renames as copied.'''
962 and the files in renames as copied.'''
963 wctx = repo[None]
963 wctx = repo[None]
964 wlock = repo.wlock()
964 wlock = repo.wlock()
965 try:
965 try:
966 wctx.forget(deleted)
966 wctx.forget(deleted)
967 wctx.add(unknown)
967 wctx.add(unknown)
968 for new, old in renames.iteritems():
968 for new, old in renames.iteritems():
969 wctx.copy(old, new)
969 wctx.copy(old, new)
970 finally:
970 finally:
971 wlock.release()
971 wlock.release()
972
972
973 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
973 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
974 """Update the dirstate to reflect the intent of copying src to dst. For
974 """Update the dirstate to reflect the intent of copying src to dst. For
975 different reasons it might not end with dst being marked as copied from src.
975 different reasons it might not end with dst being marked as copied from src.
976 """
976 """
977 origsrc = repo.dirstate.copied(src) or src
977 origsrc = repo.dirstate.copied(src) or src
978 if dst == origsrc: # copying back a copy?
978 if dst == origsrc: # copying back a copy?
979 if repo.dirstate[dst] not in 'mn' and not dryrun:
979 if repo.dirstate[dst] not in 'mn' and not dryrun:
980 repo.dirstate.normallookup(dst)
980 repo.dirstate.normallookup(dst)
981 else:
981 else:
982 if repo.dirstate[origsrc] == 'a' and origsrc == src:
982 if repo.dirstate[origsrc] == 'a' and origsrc == src:
983 if not ui.quiet:
983 if not ui.quiet:
984 ui.warn(_("%s has not been committed yet, so no copy "
984 ui.warn(_("%s has not been committed yet, so no copy "
985 "data will be stored for %s.\n")
985 "data will be stored for %s.\n")
986 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
986 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
987 if repo.dirstate[dst] in '?r' and not dryrun:
987 if repo.dirstate[dst] in '?r' and not dryrun:
988 wctx.add([dst])
988 wctx.add([dst])
989 elif not dryrun:
989 elif not dryrun:
990 wctx.copy(origsrc, dst)
990 wctx.copy(origsrc, dst)
991
991
992 def readrequires(opener, supported):
992 def readrequires(opener, supported):
993 '''Reads and parses .hg/requires and checks if all entries found
993 '''Reads and parses .hg/requires and checks if all entries found
994 are in the list of supported features.'''
994 are in the list of supported features.'''
995 requirements = set(opener.read("requires").splitlines())
995 requirements = set(opener.read("requires").splitlines())
996 missings = []
996 missings = []
997 for r in requirements:
997 for r in requirements:
998 if r not in supported:
998 if r not in supported:
999 if not r or not r[0].isalnum():
999 if not r or not r[0].isalnum():
1000 raise error.RequirementError(_(".hg/requires file is corrupt"))
1000 raise error.RequirementError(_(".hg/requires file is corrupt"))
1001 missings.append(r)
1001 missings.append(r)
1002 missings.sort()
1002 missings.sort()
1003 if missings:
1003 if missings:
1004 raise error.RequirementError(
1004 raise error.RequirementError(
1005 _("repository requires features unknown to this Mercurial: %s")
1005 _("repository requires features unknown to this Mercurial: %s")
1006 % " ".join(missings),
1006 % " ".join(missings),
1007 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1007 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1008 " for more information"))
1008 " for more information"))
1009 return requirements
1009 return requirements
1010
1010
1011 class filecachesubentry(object):
1011 class filecachesubentry(object):
1012 def __init__(self, path, stat):
1012 def __init__(self, path, stat):
1013 self.path = path
1013 self.path = path
1014 self.cachestat = None
1014 self.cachestat = None
1015 self._cacheable = None
1015 self._cacheable = None
1016
1016
1017 if stat:
1017 if stat:
1018 self.cachestat = filecachesubentry.stat(self.path)
1018 self.cachestat = filecachesubentry.stat(self.path)
1019
1019
1020 if self.cachestat:
1020 if self.cachestat:
1021 self._cacheable = self.cachestat.cacheable()
1021 self._cacheable = self.cachestat.cacheable()
1022 else:
1022 else:
1023 # None means we don't know yet
1023 # None means we don't know yet
1024 self._cacheable = None
1024 self._cacheable = None
1025
1025
1026 def refresh(self):
1026 def refresh(self):
1027 if self.cacheable():
1027 if self.cacheable():
1028 self.cachestat = filecachesubentry.stat(self.path)
1028 self.cachestat = filecachesubentry.stat(self.path)
1029
1029
1030 def cacheable(self):
1030 def cacheable(self):
1031 if self._cacheable is not None:
1031 if self._cacheable is not None:
1032 return self._cacheable
1032 return self._cacheable
1033
1033
1034 # we don't know yet, assume it is for now
1034 # we don't know yet, assume it is for now
1035 return True
1035 return True
1036
1036
1037 def changed(self):
1037 def changed(self):
1038 # no point in going further if we can't cache it
1038 # no point in going further if we can't cache it
1039 if not self.cacheable():
1039 if not self.cacheable():
1040 return True
1040 return True
1041
1041
1042 newstat = filecachesubentry.stat(self.path)
1042 newstat = filecachesubentry.stat(self.path)
1043
1043
1044 # we may not know if it's cacheable yet, check again now
1044 # we may not know if it's cacheable yet, check again now
1045 if newstat and self._cacheable is None:
1045 if newstat and self._cacheable is None:
1046 self._cacheable = newstat.cacheable()
1046 self._cacheable = newstat.cacheable()
1047
1047
1048 # check again
1048 # check again
1049 if not self._cacheable:
1049 if not self._cacheable:
1050 return True
1050 return True
1051
1051
1052 if self.cachestat != newstat:
1052 if self.cachestat != newstat:
1053 self.cachestat = newstat
1053 self.cachestat = newstat
1054 return True
1054 return True
1055 else:
1055 else:
1056 return False
1056 return False
1057
1057
1058 @staticmethod
1058 @staticmethod
1059 def stat(path):
1059 def stat(path):
1060 try:
1060 try:
1061 return util.cachestat(path)
1061 return util.cachestat(path)
1062 except OSError, e:
1062 except OSError, e:
1063 if e.errno != errno.ENOENT:
1063 if e.errno != errno.ENOENT:
1064 raise
1064 raise
1065
1065
1066 class filecacheentry(object):
1066 class filecacheentry(object):
1067 def __init__(self, paths, stat=True):
1067 def __init__(self, paths, stat=True):
1068 self._entries = []
1068 self._entries = []
1069 for path in paths:
1069 for path in paths:
1070 self._entries.append(filecachesubentry(path, stat))
1070 self._entries.append(filecachesubentry(path, stat))
1071
1071
1072 def changed(self):
1072 def changed(self):
1073 '''true if any entry has changed'''
1073 '''true if any entry has changed'''
1074 for entry in self._entries:
1074 for entry in self._entries:
1075 if entry.changed():
1075 if entry.changed():
1076 return True
1076 return True
1077 return False
1077 return False
1078
1078
1079 def refresh(self):
1079 def refresh(self):
1080 for entry in self._entries:
1080 for entry in self._entries:
1081 entry.refresh()
1081 entry.refresh()
1082
1082
1083 class filecache(object):
1083 class filecache(object):
1084 '''A property like decorator that tracks files under .hg/ for updates.
1084 '''A property like decorator that tracks files under .hg/ for updates.
1085
1085
1086 Records stat info when called in _filecache.
1086 Records stat info when called in _filecache.
1087
1087
1088 On subsequent calls, compares old stat info with new info, and recreates the
1088 On subsequent calls, compares old stat info with new info, and recreates the
1089 object when any of the files changes, updating the new stat info in
1089 object when any of the files changes, updating the new stat info in
1090 _filecache.
1090 _filecache.
1091
1091
1092 Mercurial either atomic renames or appends for files under .hg,
1092 Mercurial either atomic renames or appends for files under .hg,
1093 so to ensure the cache is reliable we need the filesystem to be able
1093 so to ensure the cache is reliable we need the filesystem to be able
1094 to tell us if a file has been replaced. If it can't, we fallback to
1094 to tell us if a file has been replaced. If it can't, we fallback to
1095 recreating the object on every call (essentially the same behaviour as
1095 recreating the object on every call (essentially the same behaviour as
1096 propertycache).
1096 propertycache).
1097
1097
1098 '''
1098 '''
1099 def __init__(self, *paths):
1099 def __init__(self, *paths):
1100 self.paths = paths
1100 self.paths = paths
1101
1101
1102 def join(self, obj, fname):
1102 def join(self, obj, fname):
1103 """Used to compute the runtime path of a cached file.
1103 """Used to compute the runtime path of a cached file.
1104
1104
1105 Users should subclass filecache and provide their own version of this
1105 Users should subclass filecache and provide their own version of this
1106 function to call the appropriate join function on 'obj' (an instance
1106 function to call the appropriate join function on 'obj' (an instance
1107 of the class that its member function was decorated).
1107 of the class that its member function was decorated).
1108 """
1108 """
1109 return obj.join(fname)
1109 return obj.join(fname)
1110
1110
1111 def __call__(self, func):
1111 def __call__(self, func):
1112 self.func = func
1112 self.func = func
1113 self.name = func.__name__
1113 self.name = func.__name__
1114 return self
1114 return self
1115
1115
1116 def __get__(self, obj, type=None):
1116 def __get__(self, obj, type=None):
1117 # do we need to check if the file changed?
1117 # do we need to check if the file changed?
1118 if self.name in obj.__dict__:
1118 if self.name in obj.__dict__:
1119 assert self.name in obj._filecache, self.name
1119 assert self.name in obj._filecache, self.name
1120 return obj.__dict__[self.name]
1120 return obj.__dict__[self.name]
1121
1121
1122 entry = obj._filecache.get(self.name)
1122 entry = obj._filecache.get(self.name)
1123
1123
1124 if entry:
1124 if entry:
1125 if entry.changed():
1125 if entry.changed():
1126 entry.obj = self.func(obj)
1126 entry.obj = self.func(obj)
1127 else:
1127 else:
1128 paths = [self.join(obj, path) for path in self.paths]
1128 paths = [self.join(obj, path) for path in self.paths]
1129
1129
1130 # We stat -before- creating the object so our cache doesn't lie if
1130 # We stat -before- creating the object so our cache doesn't lie if
1131 # a writer modified between the time we read and stat
1131 # a writer modified between the time we read and stat
1132 entry = filecacheentry(paths, True)
1132 entry = filecacheentry(paths, True)
1133 entry.obj = self.func(obj)
1133 entry.obj = self.func(obj)
1134
1134
1135 obj._filecache[self.name] = entry
1135 obj._filecache[self.name] = entry
1136
1136
1137 obj.__dict__[self.name] = entry.obj
1137 obj.__dict__[self.name] = entry.obj
1138 return entry.obj
1138 return entry.obj
1139
1139
1140 def __set__(self, obj, value):
1140 def __set__(self, obj, value):
1141 if self.name not in obj._filecache:
1141 if self.name not in obj._filecache:
1142 # we add an entry for the missing value because X in __dict__
1142 # we add an entry for the missing value because X in __dict__
1143 # implies X in _filecache
1143 # implies X in _filecache
1144 paths = [self.join(obj, path) for path in self.paths]
1144 paths = [self.join(obj, path) for path in self.paths]
1145 ce = filecacheentry(paths, False)
1145 ce = filecacheentry(paths, False)
1146 obj._filecache[self.name] = ce
1146 obj._filecache[self.name] = ce
1147 else:
1147 else:
1148 ce = obj._filecache[self.name]
1148 ce = obj._filecache[self.name]
1149
1149
1150 ce.obj = value # update cached copy
1150 ce.obj = value # update cached copy
1151 obj.__dict__[self.name] = value # update copy returned by obj.x
1151 obj.__dict__[self.name] = value # update copy returned by obj.x
1152
1152
1153 def __delete__(self, obj):
1153 def __delete__(self, obj):
1154 try:
1154 try:
1155 del obj.__dict__[self.name]
1155 del obj.__dict__[self.name]
1156 except KeyError:
1156 except KeyError:
1157 raise AttributeError(self.name)
1157 raise AttributeError(self.name)
@@ -1,84 +1,82
1
1
2 $ cat << EOF > buggylocking.py
2 $ cat << EOF > buggylocking.py
3 > """A small extension that acquire locks in the wrong order
3 > """A small extension that acquire locks in the wrong order
4 > """
4 > """
5 >
5 >
6 > from mercurial import cmdutil
6 > from mercurial import cmdutil
7 >
7 >
8 > cmdtable = {}
8 > cmdtable = {}
9 > command = cmdutil.command(cmdtable)
9 > command = cmdutil.command(cmdtable)
10 >
10 >
11 > @command('buggylocking', [], '')
11 > @command('buggylocking', [], '')
12 > def buggylocking(ui, repo):
12 > def buggylocking(ui, repo):
13 > tr = repo.transaction('buggy')
13 > tr = repo.transaction('buggy')
14 > lo = repo.lock()
14 > lo = repo.lock()
15 > wl = repo.wlock()
15 > wl = repo.wlock()
16 > wl.release()
16 > wl.release()
17 > lo.release()
17 > lo.release()
18 >
18 >
19 > @command('properlocking', [], '')
19 > @command('properlocking', [], '')
20 > def properlocking(ui, repo):
20 > def properlocking(ui, repo):
21 > """check that reentrance is fine"""
21 > """check that reentrance is fine"""
22 > wl = repo.wlock()
22 > wl = repo.wlock()
23 > lo = repo.lock()
23 > lo = repo.lock()
24 > tr = repo.transaction('proper')
24 > tr = repo.transaction('proper')
25 > tr2 = repo.transaction('proper')
25 > tr2 = repo.transaction('proper')
26 > lo2 = repo.lock()
26 > lo2 = repo.lock()
27 > wl2 = repo.wlock()
27 > wl2 = repo.wlock()
28 > wl2.release()
28 > wl2.release()
29 > lo2.release()
29 > lo2.release()
30 > tr2.close()
30 > tr2.close()
31 > tr.close()
31 > tr.close()
32 > lo.release()
32 > lo.release()
33 > wl.release()
33 > wl.release()
34 > EOF
34 > EOF
35
35
36 $ cat << EOF >> $HGRCPATH
36 $ cat << EOF >> $HGRCPATH
37 > [extensions]
37 > [extensions]
38 > buggylocking=$TESTTMP/buggylocking.py
38 > buggylocking=$TESTTMP/buggylocking.py
39 > [devel]
39 > [devel]
40 > all=1
40 > all=1
41 > EOF
41 > EOF
42
42
43 $ hg init lock-checker
43 $ hg init lock-checker
44 $ cd lock-checker
44 $ cd lock-checker
45 $ hg buggylocking
45 $ hg buggylocking
46 transaction with no lock
46 transaction with no lock
47 "wlock" acquired after "lock"
47 "wlock" acquired after "lock"
48 $ cat << EOF >> $HGRCPATH
48 $ cat << EOF >> $HGRCPATH
49 > [devel]
49 > [devel]
50 > all=0
50 > all=0
51 > check-locks=1
51 > check-locks=1
52 > EOF
52 > EOF
53 $ hg buggylocking
53 $ hg buggylocking
54 transaction with no lock
54 transaction with no lock
55 "wlock" acquired after "lock"
55 "wlock" acquired after "lock"
56 $ hg buggylocking --traceback
56 $ hg buggylocking --traceback
57 transaction with no lock
57 transaction with no lock at:
58 at:
59 */hg:* in * (glob)
58 */hg:* in * (glob)
60 */mercurial/dispatch.py:* in run (glob)
59 */mercurial/dispatch.py:* in run (glob)
61 */mercurial/dispatch.py:* in dispatch (glob)
60 */mercurial/dispatch.py:* in dispatch (glob)
62 */mercurial/dispatch.py:* in _runcatch (glob)
61 */mercurial/dispatch.py:* in _runcatch (glob)
63 */mercurial/dispatch.py:* in _dispatch (glob)
62 */mercurial/dispatch.py:* in _dispatch (glob)
64 */mercurial/dispatch.py:* in runcommand (glob)
63 */mercurial/dispatch.py:* in runcommand (glob)
65 */mercurial/dispatch.py:* in _runcommand (glob)
64 */mercurial/dispatch.py:* in _runcommand (glob)
66 */mercurial/dispatch.py:* in checkargs (glob)
65 */mercurial/dispatch.py:* in checkargs (glob)
67 */mercurial/dispatch.py:* in <lambda> (glob)
66 */mercurial/dispatch.py:* in <lambda> (glob)
68 */mercurial/util.py:* in check (glob)
67 */mercurial/util.py:* in check (glob)
69 $TESTTMP/buggylocking.py:* in buggylocking (glob)
68 $TESTTMP/buggylocking.py:* in buggylocking (glob)
70 "wlock" acquired after "lock"
69 "wlock" acquired after "lock" at:
71 at:
72 */hg:* in * (glob)
70 */hg:* in * (glob)
73 */mercurial/dispatch.py:* in run (glob)
71 */mercurial/dispatch.py:* in run (glob)
74 */mercurial/dispatch.py:* in dispatch (glob)
72 */mercurial/dispatch.py:* in dispatch (glob)
75 */mercurial/dispatch.py:* in _runcatch (glob)
73 */mercurial/dispatch.py:* in _runcatch (glob)
76 */mercurial/dispatch.py:* in _dispatch (glob)
74 */mercurial/dispatch.py:* in _dispatch (glob)
77 */mercurial/dispatch.py:* in runcommand (glob)
75 */mercurial/dispatch.py:* in runcommand (glob)
78 */mercurial/dispatch.py:* in _runcommand (glob)
76 */mercurial/dispatch.py:* in _runcommand (glob)
79 */mercurial/dispatch.py:* in checkargs (glob)
77 */mercurial/dispatch.py:* in checkargs (glob)
80 */mercurial/dispatch.py:* in <lambda> (glob)
78 */mercurial/dispatch.py:* in <lambda> (glob)
81 */mercurial/util.py:* in check (glob)
79 */mercurial/util.py:* in check (glob)
82 $TESTTMP/buggylocking.py:* in buggylocking (glob)
80 $TESTTMP/buggylocking.py:* in buggylocking (glob)
83 $ hg properlocking
81 $ hg properlocking
84 $ cd ..
82 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now