##// END OF EJS Templates
commit: remove dead initialization of 'lock'...
Martin von Zweigbergk -
r22908:71570f31 default
parent child Browse files
Show More
@@ -1,1787 +1,1787 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 # a list of (ui, featureset) functions.
183 # a list of (ui, featureset) functions.
184 # only functions defined in module of enabled extensions are invoked
184 # only functions defined in module of enabled extensions are invoked
185 featuresetupfuncs = set()
185 featuresetupfuncs = set()
186
186
187 def _baserequirements(self, create):
187 def _baserequirements(self, create):
188 return self.requirements[:]
188 return self.requirements[:]
189
189
190 def __init__(self, baseui, path=None, create=False):
190 def __init__(self, baseui, path=None, create=False):
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wopener = self.wvfs
192 self.wopener = self.wvfs
193 self.root = self.wvfs.base
193 self.root = self.wvfs.base
194 self.path = self.wvfs.join(".hg")
194 self.path = self.wvfs.join(".hg")
195 self.origroot = path
195 self.origroot = path
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.vfs = scmutil.vfs(self.path)
197 self.vfs = scmutil.vfs(self.path)
198 self.opener = self.vfs
198 self.opener = self.vfs
199 self.baseui = baseui
199 self.baseui = baseui
200 self.ui = baseui.copy()
200 self.ui = baseui.copy()
201 self.ui.copy = baseui.copy # prevent copying repo configuration
201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 # A list of callback to shape the phase if no data were found.
202 # A list of callback to shape the phase if no data were found.
203 # Callback are in the form: func(repo, roots) --> processed root.
203 # Callback are in the form: func(repo, roots) --> processed root.
204 # This list it to be filled by extension during repo setup
204 # This list it to be filled by extension during repo setup
205 self._phasedefaults = []
205 self._phasedefaults = []
206 try:
206 try:
207 self.ui.readconfig(self.join("hgrc"), self.root)
207 self.ui.readconfig(self.join("hgrc"), self.root)
208 extensions.loadall(self.ui)
208 extensions.loadall(self.ui)
209 except IOError:
209 except IOError:
210 pass
210 pass
211
211
212 if self.featuresetupfuncs:
212 if self.featuresetupfuncs:
213 self.supported = set(self._basesupported) # use private copy
213 self.supported = set(self._basesupported) # use private copy
214 extmods = set(m.__name__ for n, m
214 extmods = set(m.__name__ for n, m
215 in extensions.extensions(self.ui))
215 in extensions.extensions(self.ui))
216 for setupfunc in self.featuresetupfuncs:
216 for setupfunc in self.featuresetupfuncs:
217 if setupfunc.__module__ in extmods:
217 if setupfunc.__module__ in extmods:
218 setupfunc(self.ui, self.supported)
218 setupfunc(self.ui, self.supported)
219 else:
219 else:
220 self.supported = self._basesupported
220 self.supported = self._basesupported
221
221
222 if not self.vfs.isdir():
222 if not self.vfs.isdir():
223 if create:
223 if create:
224 if not self.wvfs.exists():
224 if not self.wvfs.exists():
225 self.wvfs.makedirs()
225 self.wvfs.makedirs()
226 self.vfs.makedir(notindexed=True)
226 self.vfs.makedir(notindexed=True)
227 requirements = self._baserequirements(create)
227 requirements = self._baserequirements(create)
228 if self.ui.configbool('format', 'usestore', True):
228 if self.ui.configbool('format', 'usestore', True):
229 self.vfs.mkdir("store")
229 self.vfs.mkdir("store")
230 requirements.append("store")
230 requirements.append("store")
231 if self.ui.configbool('format', 'usefncache', True):
231 if self.ui.configbool('format', 'usefncache', True):
232 requirements.append("fncache")
232 requirements.append("fncache")
233 if self.ui.configbool('format', 'dotencode', True):
233 if self.ui.configbool('format', 'dotencode', True):
234 requirements.append('dotencode')
234 requirements.append('dotencode')
235 # create an invalid changelog
235 # create an invalid changelog
236 self.vfs.append(
236 self.vfs.append(
237 "00changelog.i",
237 "00changelog.i",
238 '\0\0\0\2' # represents revlogv2
238 '\0\0\0\2' # represents revlogv2
239 ' dummy changelog to prevent using the old repo layout'
239 ' dummy changelog to prevent using the old repo layout'
240 )
240 )
241 if self.ui.configbool('format', 'generaldelta', False):
241 if self.ui.configbool('format', 'generaldelta', False):
242 requirements.append("generaldelta")
242 requirements.append("generaldelta")
243 requirements = set(requirements)
243 requirements = set(requirements)
244 else:
244 else:
245 raise error.RepoError(_("repository %s not found") % path)
245 raise error.RepoError(_("repository %s not found") % path)
246 elif create:
246 elif create:
247 raise error.RepoError(_("repository %s already exists") % path)
247 raise error.RepoError(_("repository %s already exists") % path)
248 else:
248 else:
249 try:
249 try:
250 requirements = scmutil.readrequires(self.vfs, self.supported)
250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 except IOError, inst:
251 except IOError, inst:
252 if inst.errno != errno.ENOENT:
252 if inst.errno != errno.ENOENT:
253 raise
253 raise
254 requirements = set()
254 requirements = set()
255
255
256 self.sharedpath = self.path
256 self.sharedpath = self.path
257 try:
257 try:
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 realpath=True)
259 realpath=True)
260 s = vfs.base
260 s = vfs.base
261 if not vfs.exists():
261 if not vfs.exists():
262 raise error.RepoError(
262 raise error.RepoError(
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 self.sharedpath = s
264 self.sharedpath = s
265 except IOError, inst:
265 except IOError, inst:
266 if inst.errno != errno.ENOENT:
266 if inst.errno != errno.ENOENT:
267 raise
267 raise
268
268
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.spath = self.store.path
270 self.spath = self.store.path
271 self.svfs = self.store.vfs
271 self.svfs = self.store.vfs
272 self.sopener = self.svfs
272 self.sopener = self.svfs
273 self.sjoin = self.store.join
273 self.sjoin = self.store.join
274 self.vfs.createmode = self.store.createmode
274 self.vfs.createmode = self.store.createmode
275 self._applyrequirements(requirements)
275 self._applyrequirements(requirements)
276 if create:
276 if create:
277 self._writerequirements()
277 self._writerequirements()
278
278
279
279
280 self._branchcaches = {}
280 self._branchcaches = {}
281 self.filterpats = {}
281 self.filterpats = {}
282 self._datafilters = {}
282 self._datafilters = {}
283 self._transref = self._lockref = self._wlockref = None
283 self._transref = self._lockref = self._wlockref = None
284
284
285 # A cache for various files under .hg/ that tracks file changes,
285 # A cache for various files under .hg/ that tracks file changes,
286 # (used by the filecache decorator)
286 # (used by the filecache decorator)
287 #
287 #
288 # Maps a property name to its util.filecacheentry
288 # Maps a property name to its util.filecacheentry
289 self._filecache = {}
289 self._filecache = {}
290
290
291 # hold sets of revision to be filtered
291 # hold sets of revision to be filtered
292 # should be cleared when something might have changed the filter value:
292 # should be cleared when something might have changed the filter value:
293 # - new changesets,
293 # - new changesets,
294 # - phase change,
294 # - phase change,
295 # - new obsolescence marker,
295 # - new obsolescence marker,
296 # - working directory parent change,
296 # - working directory parent change,
297 # - bookmark changes
297 # - bookmark changes
298 self.filteredrevcache = {}
298 self.filteredrevcache = {}
299
299
300 def close(self):
300 def close(self):
301 pass
301 pass
302
302
303 def _restrictcapabilities(self, caps):
303 def _restrictcapabilities(self, caps):
304 # bundle2 is not ready for prime time, drop it unless explicitly
304 # bundle2 is not ready for prime time, drop it unless explicitly
305 # required by the tests (or some brave tester)
305 # required by the tests (or some brave tester)
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 caps = set(caps)
307 caps = set(caps)
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 return caps
310 return caps
311
311
312 def _applyrequirements(self, requirements):
312 def _applyrequirements(self, requirements):
313 self.requirements = requirements
313 self.requirements = requirements
314 self.sopener.options = dict((r, 1) for r in requirements
314 self.sopener.options = dict((r, 1) for r in requirements
315 if r in self.openerreqs)
315 if r in self.openerreqs)
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 if chunkcachesize is not None:
317 if chunkcachesize is not None:
318 self.sopener.options['chunkcachesize'] = chunkcachesize
318 self.sopener.options['chunkcachesize'] = chunkcachesize
319
319
320 def _writerequirements(self):
320 def _writerequirements(self):
321 reqfile = self.opener("requires", "w")
321 reqfile = self.opener("requires", "w")
322 for r in sorted(self.requirements):
322 for r in sorted(self.requirements):
323 reqfile.write("%s\n" % r)
323 reqfile.write("%s\n" % r)
324 reqfile.close()
324 reqfile.close()
325
325
326 def _checknested(self, path):
326 def _checknested(self, path):
327 """Determine if path is a legal nested repository."""
327 """Determine if path is a legal nested repository."""
328 if not path.startswith(self.root):
328 if not path.startswith(self.root):
329 return False
329 return False
330 subpath = path[len(self.root) + 1:]
330 subpath = path[len(self.root) + 1:]
331 normsubpath = util.pconvert(subpath)
331 normsubpath = util.pconvert(subpath)
332
332
333 # XXX: Checking against the current working copy is wrong in
333 # XXX: Checking against the current working copy is wrong in
334 # the sense that it can reject things like
334 # the sense that it can reject things like
335 #
335 #
336 # $ hg cat -r 10 sub/x.txt
336 # $ hg cat -r 10 sub/x.txt
337 #
337 #
338 # if sub/ is no longer a subrepository in the working copy
338 # if sub/ is no longer a subrepository in the working copy
339 # parent revision.
339 # parent revision.
340 #
340 #
341 # However, it can of course also allow things that would have
341 # However, it can of course also allow things that would have
342 # been rejected before, such as the above cat command if sub/
342 # been rejected before, such as the above cat command if sub/
343 # is a subrepository now, but was a normal directory before.
343 # is a subrepository now, but was a normal directory before.
344 # The old path auditor would have rejected by mistake since it
344 # The old path auditor would have rejected by mistake since it
345 # panics when it sees sub/.hg/.
345 # panics when it sees sub/.hg/.
346 #
346 #
347 # All in all, checking against the working copy seems sensible
347 # All in all, checking against the working copy seems sensible
348 # since we want to prevent access to nested repositories on
348 # since we want to prevent access to nested repositories on
349 # the filesystem *now*.
349 # the filesystem *now*.
350 ctx = self[None]
350 ctx = self[None]
351 parts = util.splitpath(subpath)
351 parts = util.splitpath(subpath)
352 while parts:
352 while parts:
353 prefix = '/'.join(parts)
353 prefix = '/'.join(parts)
354 if prefix in ctx.substate:
354 if prefix in ctx.substate:
355 if prefix == normsubpath:
355 if prefix == normsubpath:
356 return True
356 return True
357 else:
357 else:
358 sub = ctx.sub(prefix)
358 sub = ctx.sub(prefix)
359 return sub.checknested(subpath[len(prefix) + 1:])
359 return sub.checknested(subpath[len(prefix) + 1:])
360 else:
360 else:
361 parts.pop()
361 parts.pop()
362 return False
362 return False
363
363
364 def peer(self):
364 def peer(self):
365 return localpeer(self) # not cached to avoid reference cycle
365 return localpeer(self) # not cached to avoid reference cycle
366
366
367 def unfiltered(self):
367 def unfiltered(self):
368 """Return unfiltered version of the repository
368 """Return unfiltered version of the repository
369
369
370 Intended to be overwritten by filtered repo."""
370 Intended to be overwritten by filtered repo."""
371 return self
371 return self
372
372
373 def filtered(self, name):
373 def filtered(self, name):
374 """Return a filtered version of a repository"""
374 """Return a filtered version of a repository"""
375 # build a new class with the mixin and the current class
375 # build a new class with the mixin and the current class
376 # (possibly subclass of the repo)
376 # (possibly subclass of the repo)
377 class proxycls(repoview.repoview, self.unfiltered().__class__):
377 class proxycls(repoview.repoview, self.unfiltered().__class__):
378 pass
378 pass
379 return proxycls(self, name)
379 return proxycls(self, name)
380
380
381 @repofilecache('bookmarks')
381 @repofilecache('bookmarks')
382 def _bookmarks(self):
382 def _bookmarks(self):
383 return bookmarks.bmstore(self)
383 return bookmarks.bmstore(self)
384
384
385 @repofilecache('bookmarks.current')
385 @repofilecache('bookmarks.current')
386 def _bookmarkcurrent(self):
386 def _bookmarkcurrent(self):
387 return bookmarks.readcurrent(self)
387 return bookmarks.readcurrent(self)
388
388
389 def bookmarkheads(self, bookmark):
389 def bookmarkheads(self, bookmark):
390 name = bookmark.split('@', 1)[0]
390 name = bookmark.split('@', 1)[0]
391 heads = []
391 heads = []
392 for mark, n in self._bookmarks.iteritems():
392 for mark, n in self._bookmarks.iteritems():
393 if mark.split('@', 1)[0] == name:
393 if mark.split('@', 1)[0] == name:
394 heads.append(n)
394 heads.append(n)
395 return heads
395 return heads
396
396
397 @storecache('phaseroots')
397 @storecache('phaseroots')
398 def _phasecache(self):
398 def _phasecache(self):
399 return phases.phasecache(self, self._phasedefaults)
399 return phases.phasecache(self, self._phasedefaults)
400
400
401 @storecache('obsstore')
401 @storecache('obsstore')
402 def obsstore(self):
402 def obsstore(self):
403 # read default format for new obsstore.
403 # read default format for new obsstore.
404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
405 # rely on obsstore class default when possible.
405 # rely on obsstore class default when possible.
406 kwargs = {}
406 kwargs = {}
407 if defaultformat is not None:
407 if defaultformat is not None:
408 defaultformat['defaultformat'] = defaultformat
408 defaultformat['defaultformat'] = defaultformat
409 store = obsolete.obsstore(self.sopener, **kwargs)
409 store = obsolete.obsstore(self.sopener, **kwargs)
410 if store and not obsolete._enabled:
410 if store and not obsolete._enabled:
411 # message is rare enough to not be translated
411 # message is rare enough to not be translated
412 msg = 'obsolete feature not enabled but %i markers found!\n'
412 msg = 'obsolete feature not enabled but %i markers found!\n'
413 self.ui.warn(msg % len(list(store)))
413 self.ui.warn(msg % len(list(store)))
414 return store
414 return store
415
415
416 @storecache('00changelog.i')
416 @storecache('00changelog.i')
417 def changelog(self):
417 def changelog(self):
418 c = changelog.changelog(self.sopener)
418 c = changelog.changelog(self.sopener)
419 if 'HG_PENDING' in os.environ:
419 if 'HG_PENDING' in os.environ:
420 p = os.environ['HG_PENDING']
420 p = os.environ['HG_PENDING']
421 if p.startswith(self.root):
421 if p.startswith(self.root):
422 c.readpending('00changelog.i.a')
422 c.readpending('00changelog.i.a')
423 return c
423 return c
424
424
425 @storecache('00manifest.i')
425 @storecache('00manifest.i')
426 def manifest(self):
426 def manifest(self):
427 return manifest.manifest(self.sopener)
427 return manifest.manifest(self.sopener)
428
428
429 @repofilecache('dirstate')
429 @repofilecache('dirstate')
430 def dirstate(self):
430 def dirstate(self):
431 warned = [0]
431 warned = [0]
432 def validate(node):
432 def validate(node):
433 try:
433 try:
434 self.changelog.rev(node)
434 self.changelog.rev(node)
435 return node
435 return node
436 except error.LookupError:
436 except error.LookupError:
437 if not warned[0]:
437 if not warned[0]:
438 warned[0] = True
438 warned[0] = True
439 self.ui.warn(_("warning: ignoring unknown"
439 self.ui.warn(_("warning: ignoring unknown"
440 " working parent %s!\n") % short(node))
440 " working parent %s!\n") % short(node))
441 return nullid
441 return nullid
442
442
443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
444
444
445 def __getitem__(self, changeid):
445 def __getitem__(self, changeid):
446 if changeid is None:
446 if changeid is None:
447 return context.workingctx(self)
447 return context.workingctx(self)
448 return context.changectx(self, changeid)
448 return context.changectx(self, changeid)
449
449
450 def __contains__(self, changeid):
450 def __contains__(self, changeid):
451 try:
451 try:
452 return bool(self.lookup(changeid))
452 return bool(self.lookup(changeid))
453 except error.RepoLookupError:
453 except error.RepoLookupError:
454 return False
454 return False
455
455
456 def __nonzero__(self):
456 def __nonzero__(self):
457 return True
457 return True
458
458
459 def __len__(self):
459 def __len__(self):
460 return len(self.changelog)
460 return len(self.changelog)
461
461
462 def __iter__(self):
462 def __iter__(self):
463 return iter(self.changelog)
463 return iter(self.changelog)
464
464
465 def revs(self, expr, *args):
465 def revs(self, expr, *args):
466 '''Return a list of revisions matching the given revset'''
466 '''Return a list of revisions matching the given revset'''
467 expr = revset.formatspec(expr, *args)
467 expr = revset.formatspec(expr, *args)
468 m = revset.match(None, expr)
468 m = revset.match(None, expr)
469 return m(self, revset.spanset(self))
469 return m(self, revset.spanset(self))
470
470
471 def set(self, expr, *args):
471 def set(self, expr, *args):
472 '''
472 '''
473 Yield a context for each matching revision, after doing arg
473 Yield a context for each matching revision, after doing arg
474 replacement via revset.formatspec
474 replacement via revset.formatspec
475 '''
475 '''
476 for r in self.revs(expr, *args):
476 for r in self.revs(expr, *args):
477 yield self[r]
477 yield self[r]
478
478
479 def url(self):
479 def url(self):
480 return 'file:' + self.root
480 return 'file:' + self.root
481
481
482 def hook(self, name, throw=False, **args):
482 def hook(self, name, throw=False, **args):
483 """Call a hook, passing this repo instance.
483 """Call a hook, passing this repo instance.
484
484
485 This a convenience method to aid invoking hooks. Extensions likely
485 This a convenience method to aid invoking hooks. Extensions likely
486 won't call this unless they have registered a custom hook or are
486 won't call this unless they have registered a custom hook or are
487 replacing code that is expected to call a hook.
487 replacing code that is expected to call a hook.
488 """
488 """
489 return hook.hook(self.ui, self, name, throw, **args)
489 return hook.hook(self.ui, self, name, throw, **args)
490
490
491 @unfilteredmethod
491 @unfilteredmethod
492 def _tag(self, names, node, message, local, user, date, extra={},
492 def _tag(self, names, node, message, local, user, date, extra={},
493 editor=False):
493 editor=False):
494 if isinstance(names, str):
494 if isinstance(names, str):
495 names = (names,)
495 names = (names,)
496
496
497 branches = self.branchmap()
497 branches = self.branchmap()
498 for name in names:
498 for name in names:
499 self.hook('pretag', throw=True, node=hex(node), tag=name,
499 self.hook('pretag', throw=True, node=hex(node), tag=name,
500 local=local)
500 local=local)
501 if name in branches:
501 if name in branches:
502 self.ui.warn(_("warning: tag %s conflicts with existing"
502 self.ui.warn(_("warning: tag %s conflicts with existing"
503 " branch name\n") % name)
503 " branch name\n") % name)
504
504
505 def writetags(fp, names, munge, prevtags):
505 def writetags(fp, names, munge, prevtags):
506 fp.seek(0, 2)
506 fp.seek(0, 2)
507 if prevtags and prevtags[-1] != '\n':
507 if prevtags and prevtags[-1] != '\n':
508 fp.write('\n')
508 fp.write('\n')
509 for name in names:
509 for name in names:
510 m = munge and munge(name) or name
510 m = munge and munge(name) or name
511 if (self._tagscache.tagtypes and
511 if (self._tagscache.tagtypes and
512 name in self._tagscache.tagtypes):
512 name in self._tagscache.tagtypes):
513 old = self.tags().get(name, nullid)
513 old = self.tags().get(name, nullid)
514 fp.write('%s %s\n' % (hex(old), m))
514 fp.write('%s %s\n' % (hex(old), m))
515 fp.write('%s %s\n' % (hex(node), m))
515 fp.write('%s %s\n' % (hex(node), m))
516 fp.close()
516 fp.close()
517
517
518 prevtags = ''
518 prevtags = ''
519 if local:
519 if local:
520 try:
520 try:
521 fp = self.opener('localtags', 'r+')
521 fp = self.opener('localtags', 'r+')
522 except IOError:
522 except IOError:
523 fp = self.opener('localtags', 'a')
523 fp = self.opener('localtags', 'a')
524 else:
524 else:
525 prevtags = fp.read()
525 prevtags = fp.read()
526
526
527 # local tags are stored in the current charset
527 # local tags are stored in the current charset
528 writetags(fp, names, None, prevtags)
528 writetags(fp, names, None, prevtags)
529 for name in names:
529 for name in names:
530 self.hook('tag', node=hex(node), tag=name, local=local)
530 self.hook('tag', node=hex(node), tag=name, local=local)
531 return
531 return
532
532
533 try:
533 try:
534 fp = self.wfile('.hgtags', 'rb+')
534 fp = self.wfile('.hgtags', 'rb+')
535 except IOError, e:
535 except IOError, e:
536 if e.errno != errno.ENOENT:
536 if e.errno != errno.ENOENT:
537 raise
537 raise
538 fp = self.wfile('.hgtags', 'ab')
538 fp = self.wfile('.hgtags', 'ab')
539 else:
539 else:
540 prevtags = fp.read()
540 prevtags = fp.read()
541
541
542 # committed tags are stored in UTF-8
542 # committed tags are stored in UTF-8
543 writetags(fp, names, encoding.fromlocal, prevtags)
543 writetags(fp, names, encoding.fromlocal, prevtags)
544
544
545 fp.close()
545 fp.close()
546
546
547 self.invalidatecaches()
547 self.invalidatecaches()
548
548
549 if '.hgtags' not in self.dirstate:
549 if '.hgtags' not in self.dirstate:
550 self[None].add(['.hgtags'])
550 self[None].add(['.hgtags'])
551
551
552 m = matchmod.exact(self.root, '', ['.hgtags'])
552 m = matchmod.exact(self.root, '', ['.hgtags'])
553 tagnode = self.commit(message, user, date, extra=extra, match=m,
553 tagnode = self.commit(message, user, date, extra=extra, match=m,
554 editor=editor)
554 editor=editor)
555
555
556 for name in names:
556 for name in names:
557 self.hook('tag', node=hex(node), tag=name, local=local)
557 self.hook('tag', node=hex(node), tag=name, local=local)
558
558
559 return tagnode
559 return tagnode
560
560
561 def tag(self, names, node, message, local, user, date, editor=False):
561 def tag(self, names, node, message, local, user, date, editor=False):
562 '''tag a revision with one or more symbolic names.
562 '''tag a revision with one or more symbolic names.
563
563
564 names is a list of strings or, when adding a single tag, names may be a
564 names is a list of strings or, when adding a single tag, names may be a
565 string.
565 string.
566
566
567 if local is True, the tags are stored in a per-repository file.
567 if local is True, the tags are stored in a per-repository file.
568 otherwise, they are stored in the .hgtags file, and a new
568 otherwise, they are stored in the .hgtags file, and a new
569 changeset is committed with the change.
569 changeset is committed with the change.
570
570
571 keyword arguments:
571 keyword arguments:
572
572
573 local: whether to store tags in non-version-controlled file
573 local: whether to store tags in non-version-controlled file
574 (default False)
574 (default False)
575
575
576 message: commit message to use if committing
576 message: commit message to use if committing
577
577
578 user: name of user to use if committing
578 user: name of user to use if committing
579
579
580 date: date tuple to use if committing'''
580 date: date tuple to use if committing'''
581
581
582 if not local:
582 if not local:
583 m = matchmod.exact(self.root, '', ['.hgtags'])
583 m = matchmod.exact(self.root, '', ['.hgtags'])
584 if util.any(self.status(match=m, unknown=True, ignored=True)):
584 if util.any(self.status(match=m, unknown=True, ignored=True)):
585 raise util.Abort(_('working copy of .hgtags is changed'),
585 raise util.Abort(_('working copy of .hgtags is changed'),
586 hint=_('please commit .hgtags manually'))
586 hint=_('please commit .hgtags manually'))
587
587
588 self.tags() # instantiate the cache
588 self.tags() # instantiate the cache
589 self._tag(names, node, message, local, user, date, editor=editor)
589 self._tag(names, node, message, local, user, date, editor=editor)
590
590
591 @filteredpropertycache
591 @filteredpropertycache
592 def _tagscache(self):
592 def _tagscache(self):
593 '''Returns a tagscache object that contains various tags related
593 '''Returns a tagscache object that contains various tags related
594 caches.'''
594 caches.'''
595
595
596 # This simplifies its cache management by having one decorated
596 # This simplifies its cache management by having one decorated
597 # function (this one) and the rest simply fetch things from it.
597 # function (this one) and the rest simply fetch things from it.
598 class tagscache(object):
598 class tagscache(object):
599 def __init__(self):
599 def __init__(self):
600 # These two define the set of tags for this repository. tags
600 # These two define the set of tags for this repository. tags
601 # maps tag name to node; tagtypes maps tag name to 'global' or
601 # maps tag name to node; tagtypes maps tag name to 'global' or
602 # 'local'. (Global tags are defined by .hgtags across all
602 # 'local'. (Global tags are defined by .hgtags across all
603 # heads, and local tags are defined in .hg/localtags.)
603 # heads, and local tags are defined in .hg/localtags.)
604 # They constitute the in-memory cache of tags.
604 # They constitute the in-memory cache of tags.
605 self.tags = self.tagtypes = None
605 self.tags = self.tagtypes = None
606
606
607 self.nodetagscache = self.tagslist = None
607 self.nodetagscache = self.tagslist = None
608
608
609 cache = tagscache()
609 cache = tagscache()
610 cache.tags, cache.tagtypes = self._findtags()
610 cache.tags, cache.tagtypes = self._findtags()
611
611
612 return cache
612 return cache
613
613
614 def tags(self):
614 def tags(self):
615 '''return a mapping of tag to node'''
615 '''return a mapping of tag to node'''
616 t = {}
616 t = {}
617 if self.changelog.filteredrevs:
617 if self.changelog.filteredrevs:
618 tags, tt = self._findtags()
618 tags, tt = self._findtags()
619 else:
619 else:
620 tags = self._tagscache.tags
620 tags = self._tagscache.tags
621 for k, v in tags.iteritems():
621 for k, v in tags.iteritems():
622 try:
622 try:
623 # ignore tags to unknown nodes
623 # ignore tags to unknown nodes
624 self.changelog.rev(v)
624 self.changelog.rev(v)
625 t[k] = v
625 t[k] = v
626 except (error.LookupError, ValueError):
626 except (error.LookupError, ValueError):
627 pass
627 pass
628 return t
628 return t
629
629
630 def _findtags(self):
630 def _findtags(self):
631 '''Do the hard work of finding tags. Return a pair of dicts
631 '''Do the hard work of finding tags. Return a pair of dicts
632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
633 maps tag name to a string like \'global\' or \'local\'.
633 maps tag name to a string like \'global\' or \'local\'.
634 Subclasses or extensions are free to add their own tags, but
634 Subclasses or extensions are free to add their own tags, but
635 should be aware that the returned dicts will be retained for the
635 should be aware that the returned dicts will be retained for the
636 duration of the localrepo object.'''
636 duration of the localrepo object.'''
637
637
638 # XXX what tagtype should subclasses/extensions use? Currently
638 # XXX what tagtype should subclasses/extensions use? Currently
639 # mq and bookmarks add tags, but do not set the tagtype at all.
639 # mq and bookmarks add tags, but do not set the tagtype at all.
640 # Should each extension invent its own tag type? Should there
640 # Should each extension invent its own tag type? Should there
641 # be one tagtype for all such "virtual" tags? Or is the status
641 # be one tagtype for all such "virtual" tags? Or is the status
642 # quo fine?
642 # quo fine?
643
643
644 alltags = {} # map tag name to (node, hist)
644 alltags = {} # map tag name to (node, hist)
645 tagtypes = {}
645 tagtypes = {}
646
646
647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
649
649
650 # Build the return dicts. Have to re-encode tag names because
650 # Build the return dicts. Have to re-encode tag names because
651 # the tags module always uses UTF-8 (in order not to lose info
651 # the tags module always uses UTF-8 (in order not to lose info
652 # writing to the cache), but the rest of Mercurial wants them in
652 # writing to the cache), but the rest of Mercurial wants them in
653 # local encoding.
653 # local encoding.
654 tags = {}
654 tags = {}
655 for (name, (node, hist)) in alltags.iteritems():
655 for (name, (node, hist)) in alltags.iteritems():
656 if node != nullid:
656 if node != nullid:
657 tags[encoding.tolocal(name)] = node
657 tags[encoding.tolocal(name)] = node
658 tags['tip'] = self.changelog.tip()
658 tags['tip'] = self.changelog.tip()
659 tagtypes = dict([(encoding.tolocal(name), value)
659 tagtypes = dict([(encoding.tolocal(name), value)
660 for (name, value) in tagtypes.iteritems()])
660 for (name, value) in tagtypes.iteritems()])
661 return (tags, tagtypes)
661 return (tags, tagtypes)
662
662
663 def tagtype(self, tagname):
663 def tagtype(self, tagname):
664 '''
664 '''
665 return the type of the given tag. result can be:
665 return the type of the given tag. result can be:
666
666
667 'local' : a local tag
667 'local' : a local tag
668 'global' : a global tag
668 'global' : a global tag
669 None : tag does not exist
669 None : tag does not exist
670 '''
670 '''
671
671
672 return self._tagscache.tagtypes.get(tagname)
672 return self._tagscache.tagtypes.get(tagname)
673
673
674 def tagslist(self):
674 def tagslist(self):
675 '''return a list of tags ordered by revision'''
675 '''return a list of tags ordered by revision'''
676 if not self._tagscache.tagslist:
676 if not self._tagscache.tagslist:
677 l = []
677 l = []
678 for t, n in self.tags().iteritems():
678 for t, n in self.tags().iteritems():
679 l.append((self.changelog.rev(n), t, n))
679 l.append((self.changelog.rev(n), t, n))
680 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
680 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
681
681
682 return self._tagscache.tagslist
682 return self._tagscache.tagslist
683
683
684 def nodetags(self, node):
684 def nodetags(self, node):
685 '''return the tags associated with a node'''
685 '''return the tags associated with a node'''
686 if not self._tagscache.nodetagscache:
686 if not self._tagscache.nodetagscache:
687 nodetagscache = {}
687 nodetagscache = {}
688 for t, n in self._tagscache.tags.iteritems():
688 for t, n in self._tagscache.tags.iteritems():
689 nodetagscache.setdefault(n, []).append(t)
689 nodetagscache.setdefault(n, []).append(t)
690 for tags in nodetagscache.itervalues():
690 for tags in nodetagscache.itervalues():
691 tags.sort()
691 tags.sort()
692 self._tagscache.nodetagscache = nodetagscache
692 self._tagscache.nodetagscache = nodetagscache
693 return self._tagscache.nodetagscache.get(node, [])
693 return self._tagscache.nodetagscache.get(node, [])
694
694
695 def nodebookmarks(self, node):
695 def nodebookmarks(self, node):
696 marks = []
696 marks = []
697 for bookmark, n in self._bookmarks.iteritems():
697 for bookmark, n in self._bookmarks.iteritems():
698 if n == node:
698 if n == node:
699 marks.append(bookmark)
699 marks.append(bookmark)
700 return sorted(marks)
700 return sorted(marks)
701
701
702 def branchmap(self):
702 def branchmap(self):
703 '''returns a dictionary {branch: [branchheads]} with branchheads
703 '''returns a dictionary {branch: [branchheads]} with branchheads
704 ordered by increasing revision number'''
704 ordered by increasing revision number'''
705 branchmap.updatecache(self)
705 branchmap.updatecache(self)
706 return self._branchcaches[self.filtername]
706 return self._branchcaches[self.filtername]
707
707
708 def branchtip(self, branch):
708 def branchtip(self, branch):
709 '''return the tip node for a given branch'''
709 '''return the tip node for a given branch'''
710 try:
710 try:
711 return self.branchmap().branchtip(branch)
711 return self.branchmap().branchtip(branch)
712 except KeyError:
712 except KeyError:
713 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
713 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
714
714
715 def lookup(self, key):
715 def lookup(self, key):
716 return self[key].node()
716 return self[key].node()
717
717
718 def lookupbranch(self, key, remote=None):
718 def lookupbranch(self, key, remote=None):
719 repo = remote or self
719 repo = remote or self
720 if key in repo.branchmap():
720 if key in repo.branchmap():
721 return key
721 return key
722
722
723 repo = (remote and remote.local()) and remote or self
723 repo = (remote and remote.local()) and remote or self
724 return repo[key].branch()
724 return repo[key].branch()
725
725
726 def known(self, nodes):
726 def known(self, nodes):
727 nm = self.changelog.nodemap
727 nm = self.changelog.nodemap
728 pc = self._phasecache
728 pc = self._phasecache
729 result = []
729 result = []
730 for n in nodes:
730 for n in nodes:
731 r = nm.get(n)
731 r = nm.get(n)
732 resp = not (r is None or pc.phase(self, r) >= phases.secret)
732 resp = not (r is None or pc.phase(self, r) >= phases.secret)
733 result.append(resp)
733 result.append(resp)
734 return result
734 return result
735
735
736 def local(self):
736 def local(self):
737 return self
737 return self
738
738
739 def cancopy(self):
739 def cancopy(self):
740 # so statichttprepo's override of local() works
740 # so statichttprepo's override of local() works
741 if not self.local():
741 if not self.local():
742 return False
742 return False
743 if not self.ui.configbool('phases', 'publish', True):
743 if not self.ui.configbool('phases', 'publish', True):
744 return True
744 return True
745 # if publishing we can't copy if there is filtered content
745 # if publishing we can't copy if there is filtered content
746 return not self.filtered('visible').changelog.filteredrevs
746 return not self.filtered('visible').changelog.filteredrevs
747
747
748 def join(self, f, *insidef):
748 def join(self, f, *insidef):
749 return os.path.join(self.path, f, *insidef)
749 return os.path.join(self.path, f, *insidef)
750
750
751 def wjoin(self, f, *insidef):
751 def wjoin(self, f, *insidef):
752 return os.path.join(self.root, f, *insidef)
752 return os.path.join(self.root, f, *insidef)
753
753
754 def file(self, f):
754 def file(self, f):
755 if f[0] == '/':
755 if f[0] == '/':
756 f = f[1:]
756 f = f[1:]
757 return filelog.filelog(self.sopener, f)
757 return filelog.filelog(self.sopener, f)
758
758
759 def changectx(self, changeid):
759 def changectx(self, changeid):
760 return self[changeid]
760 return self[changeid]
761
761
762 def parents(self, changeid=None):
762 def parents(self, changeid=None):
763 '''get list of changectxs for parents of changeid'''
763 '''get list of changectxs for parents of changeid'''
764 return self[changeid].parents()
764 return self[changeid].parents()
765
765
766 def setparents(self, p1, p2=nullid):
766 def setparents(self, p1, p2=nullid):
767 self.dirstate.beginparentchange()
767 self.dirstate.beginparentchange()
768 copies = self.dirstate.setparents(p1, p2)
768 copies = self.dirstate.setparents(p1, p2)
769 pctx = self[p1]
769 pctx = self[p1]
770 if copies:
770 if copies:
771 # Adjust copy records, the dirstate cannot do it, it
771 # Adjust copy records, the dirstate cannot do it, it
772 # requires access to parents manifests. Preserve them
772 # requires access to parents manifests. Preserve them
773 # only for entries added to first parent.
773 # only for entries added to first parent.
774 for f in copies:
774 for f in copies:
775 if f not in pctx and copies[f] in pctx:
775 if f not in pctx and copies[f] in pctx:
776 self.dirstate.copy(copies[f], f)
776 self.dirstate.copy(copies[f], f)
777 if p2 == nullid:
777 if p2 == nullid:
778 for f, s in sorted(self.dirstate.copies().items()):
778 for f, s in sorted(self.dirstate.copies().items()):
779 if f not in pctx and s not in pctx:
779 if f not in pctx and s not in pctx:
780 self.dirstate.copy(None, f)
780 self.dirstate.copy(None, f)
781 self.dirstate.endparentchange()
781 self.dirstate.endparentchange()
782
782
783 def filectx(self, path, changeid=None, fileid=None):
783 def filectx(self, path, changeid=None, fileid=None):
784 """changeid can be a changeset revision, node, or tag.
784 """changeid can be a changeset revision, node, or tag.
785 fileid can be a file revision or node."""
785 fileid can be a file revision or node."""
786 return context.filectx(self, path, changeid, fileid)
786 return context.filectx(self, path, changeid, fileid)
787
787
788 def getcwd(self):
788 def getcwd(self):
789 return self.dirstate.getcwd()
789 return self.dirstate.getcwd()
790
790
791 def pathto(self, f, cwd=None):
791 def pathto(self, f, cwd=None):
792 return self.dirstate.pathto(f, cwd)
792 return self.dirstate.pathto(f, cwd)
793
793
794 def wfile(self, f, mode='r'):
794 def wfile(self, f, mode='r'):
795 return self.wopener(f, mode)
795 return self.wopener(f, mode)
796
796
797 def _link(self, f):
797 def _link(self, f):
798 return self.wvfs.islink(f)
798 return self.wvfs.islink(f)
799
799
800 def _loadfilter(self, filter):
800 def _loadfilter(self, filter):
801 if filter not in self.filterpats:
801 if filter not in self.filterpats:
802 l = []
802 l = []
803 for pat, cmd in self.ui.configitems(filter):
803 for pat, cmd in self.ui.configitems(filter):
804 if cmd == '!':
804 if cmd == '!':
805 continue
805 continue
806 mf = matchmod.match(self.root, '', [pat])
806 mf = matchmod.match(self.root, '', [pat])
807 fn = None
807 fn = None
808 params = cmd
808 params = cmd
809 for name, filterfn in self._datafilters.iteritems():
809 for name, filterfn in self._datafilters.iteritems():
810 if cmd.startswith(name):
810 if cmd.startswith(name):
811 fn = filterfn
811 fn = filterfn
812 params = cmd[len(name):].lstrip()
812 params = cmd[len(name):].lstrip()
813 break
813 break
814 if not fn:
814 if not fn:
815 fn = lambda s, c, **kwargs: util.filter(s, c)
815 fn = lambda s, c, **kwargs: util.filter(s, c)
816 # Wrap old filters not supporting keyword arguments
816 # Wrap old filters not supporting keyword arguments
817 if not inspect.getargspec(fn)[2]:
817 if not inspect.getargspec(fn)[2]:
818 oldfn = fn
818 oldfn = fn
819 fn = lambda s, c, **kwargs: oldfn(s, c)
819 fn = lambda s, c, **kwargs: oldfn(s, c)
820 l.append((mf, fn, params))
820 l.append((mf, fn, params))
821 self.filterpats[filter] = l
821 self.filterpats[filter] = l
822 return self.filterpats[filter]
822 return self.filterpats[filter]
823
823
824 def _filter(self, filterpats, filename, data):
824 def _filter(self, filterpats, filename, data):
825 for mf, fn, cmd in filterpats:
825 for mf, fn, cmd in filterpats:
826 if mf(filename):
826 if mf(filename):
827 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
827 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
828 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
828 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
829 break
829 break
830
830
831 return data
831 return data
832
832
833 @unfilteredpropertycache
833 @unfilteredpropertycache
834 def _encodefilterpats(self):
834 def _encodefilterpats(self):
835 return self._loadfilter('encode')
835 return self._loadfilter('encode')
836
836
837 @unfilteredpropertycache
837 @unfilteredpropertycache
838 def _decodefilterpats(self):
838 def _decodefilterpats(self):
839 return self._loadfilter('decode')
839 return self._loadfilter('decode')
840
840
841 def adddatafilter(self, name, filter):
841 def adddatafilter(self, name, filter):
842 self._datafilters[name] = filter
842 self._datafilters[name] = filter
843
843
844 def wread(self, filename):
844 def wread(self, filename):
845 if self._link(filename):
845 if self._link(filename):
846 data = self.wvfs.readlink(filename)
846 data = self.wvfs.readlink(filename)
847 else:
847 else:
848 data = self.wopener.read(filename)
848 data = self.wopener.read(filename)
849 return self._filter(self._encodefilterpats, filename, data)
849 return self._filter(self._encodefilterpats, filename, data)
850
850
851 def wwrite(self, filename, data, flags):
851 def wwrite(self, filename, data, flags):
852 data = self._filter(self._decodefilterpats, filename, data)
852 data = self._filter(self._decodefilterpats, filename, data)
853 if 'l' in flags:
853 if 'l' in flags:
854 self.wopener.symlink(data, filename)
854 self.wopener.symlink(data, filename)
855 else:
855 else:
856 self.wopener.write(filename, data)
856 self.wopener.write(filename, data)
857 if 'x' in flags:
857 if 'x' in flags:
858 self.wvfs.setflags(filename, False, True)
858 self.wvfs.setflags(filename, False, True)
859
859
860 def wwritedata(self, filename, data):
860 def wwritedata(self, filename, data):
861 return self._filter(self._decodefilterpats, filename, data)
861 return self._filter(self._decodefilterpats, filename, data)
862
862
863 def transaction(self, desc, report=None):
863 def transaction(self, desc, report=None):
864 tr = self._transref and self._transref() or None
864 tr = self._transref and self._transref() or None
865 if tr and tr.running():
865 if tr and tr.running():
866 return tr.nest()
866 return tr.nest()
867
867
868 # abort here if the journal already exists
868 # abort here if the journal already exists
869 if self.svfs.exists("journal"):
869 if self.svfs.exists("journal"):
870 raise error.RepoError(
870 raise error.RepoError(
871 _("abandoned transaction found"),
871 _("abandoned transaction found"),
872 hint=_("run 'hg recover' to clean up transaction"))
872 hint=_("run 'hg recover' to clean up transaction"))
873
873
874 def onclose():
874 def onclose():
875 self.store.write(self._transref())
875 self.store.write(self._transref())
876
876
877 self._writejournal(desc)
877 self._writejournal(desc)
878 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
878 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
879 rp = report and report or self.ui.warn
879 rp = report and report or self.ui.warn
880 tr = transaction.transaction(rp, self.sopener,
880 tr = transaction.transaction(rp, self.sopener,
881 "journal",
881 "journal",
882 aftertrans(renames),
882 aftertrans(renames),
883 self.store.createmode,
883 self.store.createmode,
884 onclose)
884 onclose)
885 self._transref = weakref.ref(tr)
885 self._transref = weakref.ref(tr)
886 return tr
886 return tr
887
887
888 def _journalfiles(self):
888 def _journalfiles(self):
889 return ((self.svfs, 'journal'),
889 return ((self.svfs, 'journal'),
890 (self.vfs, 'journal.dirstate'),
890 (self.vfs, 'journal.dirstate'),
891 (self.vfs, 'journal.branch'),
891 (self.vfs, 'journal.branch'),
892 (self.vfs, 'journal.desc'),
892 (self.vfs, 'journal.desc'),
893 (self.vfs, 'journal.bookmarks'),
893 (self.vfs, 'journal.bookmarks'),
894 (self.svfs, 'journal.phaseroots'))
894 (self.svfs, 'journal.phaseroots'))
895
895
896 def undofiles(self):
896 def undofiles(self):
897 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
897 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
898
898
899 def _writejournal(self, desc):
899 def _writejournal(self, desc):
900 self.opener.write("journal.dirstate",
900 self.opener.write("journal.dirstate",
901 self.opener.tryread("dirstate"))
901 self.opener.tryread("dirstate"))
902 self.opener.write("journal.branch",
902 self.opener.write("journal.branch",
903 encoding.fromlocal(self.dirstate.branch()))
903 encoding.fromlocal(self.dirstate.branch()))
904 self.opener.write("journal.desc",
904 self.opener.write("journal.desc",
905 "%d\n%s\n" % (len(self), desc))
905 "%d\n%s\n" % (len(self), desc))
906 self.opener.write("journal.bookmarks",
906 self.opener.write("journal.bookmarks",
907 self.opener.tryread("bookmarks"))
907 self.opener.tryread("bookmarks"))
908 self.sopener.write("journal.phaseroots",
908 self.sopener.write("journal.phaseroots",
909 self.sopener.tryread("phaseroots"))
909 self.sopener.tryread("phaseroots"))
910
910
911 def recover(self):
911 def recover(self):
912 lock = self.lock()
912 lock = self.lock()
913 try:
913 try:
914 if self.svfs.exists("journal"):
914 if self.svfs.exists("journal"):
915 self.ui.status(_("rolling back interrupted transaction\n"))
915 self.ui.status(_("rolling back interrupted transaction\n"))
916 transaction.rollback(self.sopener, "journal",
916 transaction.rollback(self.sopener, "journal",
917 self.ui.warn)
917 self.ui.warn)
918 self.invalidate()
918 self.invalidate()
919 return True
919 return True
920 else:
920 else:
921 self.ui.warn(_("no interrupted transaction available\n"))
921 self.ui.warn(_("no interrupted transaction available\n"))
922 return False
922 return False
923 finally:
923 finally:
924 lock.release()
924 lock.release()
925
925
926 def rollback(self, dryrun=False, force=False):
926 def rollback(self, dryrun=False, force=False):
927 wlock = lock = None
927 wlock = lock = None
928 try:
928 try:
929 wlock = self.wlock()
929 wlock = self.wlock()
930 lock = self.lock()
930 lock = self.lock()
931 if self.svfs.exists("undo"):
931 if self.svfs.exists("undo"):
932 return self._rollback(dryrun, force)
932 return self._rollback(dryrun, force)
933 else:
933 else:
934 self.ui.warn(_("no rollback information available\n"))
934 self.ui.warn(_("no rollback information available\n"))
935 return 1
935 return 1
936 finally:
936 finally:
937 release(lock, wlock)
937 release(lock, wlock)
938
938
939 @unfilteredmethod # Until we get smarter cache management
939 @unfilteredmethod # Until we get smarter cache management
940 def _rollback(self, dryrun, force):
940 def _rollback(self, dryrun, force):
941 ui = self.ui
941 ui = self.ui
942 try:
942 try:
943 args = self.opener.read('undo.desc').splitlines()
943 args = self.opener.read('undo.desc').splitlines()
944 (oldlen, desc, detail) = (int(args[0]), args[1], None)
944 (oldlen, desc, detail) = (int(args[0]), args[1], None)
945 if len(args) >= 3:
945 if len(args) >= 3:
946 detail = args[2]
946 detail = args[2]
947 oldtip = oldlen - 1
947 oldtip = oldlen - 1
948
948
949 if detail and ui.verbose:
949 if detail and ui.verbose:
950 msg = (_('repository tip rolled back to revision %s'
950 msg = (_('repository tip rolled back to revision %s'
951 ' (undo %s: %s)\n')
951 ' (undo %s: %s)\n')
952 % (oldtip, desc, detail))
952 % (oldtip, desc, detail))
953 else:
953 else:
954 msg = (_('repository tip rolled back to revision %s'
954 msg = (_('repository tip rolled back to revision %s'
955 ' (undo %s)\n')
955 ' (undo %s)\n')
956 % (oldtip, desc))
956 % (oldtip, desc))
957 except IOError:
957 except IOError:
958 msg = _('rolling back unknown transaction\n')
958 msg = _('rolling back unknown transaction\n')
959 desc = None
959 desc = None
960
960
961 if not force and self['.'] != self['tip'] and desc == 'commit':
961 if not force and self['.'] != self['tip'] and desc == 'commit':
962 raise util.Abort(
962 raise util.Abort(
963 _('rollback of last commit while not checked out '
963 _('rollback of last commit while not checked out '
964 'may lose data'), hint=_('use -f to force'))
964 'may lose data'), hint=_('use -f to force'))
965
965
966 ui.status(msg)
966 ui.status(msg)
967 if dryrun:
967 if dryrun:
968 return 0
968 return 0
969
969
970 parents = self.dirstate.parents()
970 parents = self.dirstate.parents()
971 self.destroying()
971 self.destroying()
972 transaction.rollback(self.sopener, 'undo', ui.warn)
972 transaction.rollback(self.sopener, 'undo', ui.warn)
973 if self.vfs.exists('undo.bookmarks'):
973 if self.vfs.exists('undo.bookmarks'):
974 self.vfs.rename('undo.bookmarks', 'bookmarks')
974 self.vfs.rename('undo.bookmarks', 'bookmarks')
975 if self.svfs.exists('undo.phaseroots'):
975 if self.svfs.exists('undo.phaseroots'):
976 self.svfs.rename('undo.phaseroots', 'phaseroots')
976 self.svfs.rename('undo.phaseroots', 'phaseroots')
977 self.invalidate()
977 self.invalidate()
978
978
979 parentgone = (parents[0] not in self.changelog.nodemap or
979 parentgone = (parents[0] not in self.changelog.nodemap or
980 parents[1] not in self.changelog.nodemap)
980 parents[1] not in self.changelog.nodemap)
981 if parentgone:
981 if parentgone:
982 self.vfs.rename('undo.dirstate', 'dirstate')
982 self.vfs.rename('undo.dirstate', 'dirstate')
983 try:
983 try:
984 branch = self.opener.read('undo.branch')
984 branch = self.opener.read('undo.branch')
985 self.dirstate.setbranch(encoding.tolocal(branch))
985 self.dirstate.setbranch(encoding.tolocal(branch))
986 except IOError:
986 except IOError:
987 ui.warn(_('named branch could not be reset: '
987 ui.warn(_('named branch could not be reset: '
988 'current branch is still \'%s\'\n')
988 'current branch is still \'%s\'\n')
989 % self.dirstate.branch())
989 % self.dirstate.branch())
990
990
991 self.dirstate.invalidate()
991 self.dirstate.invalidate()
992 parents = tuple([p.rev() for p in self.parents()])
992 parents = tuple([p.rev() for p in self.parents()])
993 if len(parents) > 1:
993 if len(parents) > 1:
994 ui.status(_('working directory now based on '
994 ui.status(_('working directory now based on '
995 'revisions %d and %d\n') % parents)
995 'revisions %d and %d\n') % parents)
996 else:
996 else:
997 ui.status(_('working directory now based on '
997 ui.status(_('working directory now based on '
998 'revision %d\n') % parents)
998 'revision %d\n') % parents)
999 # TODO: if we know which new heads may result from this rollback, pass
999 # TODO: if we know which new heads may result from this rollback, pass
1000 # them to destroy(), which will prevent the branchhead cache from being
1000 # them to destroy(), which will prevent the branchhead cache from being
1001 # invalidated.
1001 # invalidated.
1002 self.destroyed()
1002 self.destroyed()
1003 return 0
1003 return 0
1004
1004
1005 def invalidatecaches(self):
1005 def invalidatecaches(self):
1006
1006
1007 if '_tagscache' in vars(self):
1007 if '_tagscache' in vars(self):
1008 # can't use delattr on proxy
1008 # can't use delattr on proxy
1009 del self.__dict__['_tagscache']
1009 del self.__dict__['_tagscache']
1010
1010
1011 self.unfiltered()._branchcaches.clear()
1011 self.unfiltered()._branchcaches.clear()
1012 self.invalidatevolatilesets()
1012 self.invalidatevolatilesets()
1013
1013
1014 def invalidatevolatilesets(self):
1014 def invalidatevolatilesets(self):
1015 self.filteredrevcache.clear()
1015 self.filteredrevcache.clear()
1016 obsolete.clearobscaches(self)
1016 obsolete.clearobscaches(self)
1017
1017
1018 def invalidatedirstate(self):
1018 def invalidatedirstate(self):
1019 '''Invalidates the dirstate, causing the next call to dirstate
1019 '''Invalidates the dirstate, causing the next call to dirstate
1020 to check if it was modified since the last time it was read,
1020 to check if it was modified since the last time it was read,
1021 rereading it if it has.
1021 rereading it if it has.
1022
1022
1023 This is different to dirstate.invalidate() that it doesn't always
1023 This is different to dirstate.invalidate() that it doesn't always
1024 rereads the dirstate. Use dirstate.invalidate() if you want to
1024 rereads the dirstate. Use dirstate.invalidate() if you want to
1025 explicitly read the dirstate again (i.e. restoring it to a previous
1025 explicitly read the dirstate again (i.e. restoring it to a previous
1026 known good state).'''
1026 known good state).'''
1027 if hasunfilteredcache(self, 'dirstate'):
1027 if hasunfilteredcache(self, 'dirstate'):
1028 for k in self.dirstate._filecache:
1028 for k in self.dirstate._filecache:
1029 try:
1029 try:
1030 delattr(self.dirstate, k)
1030 delattr(self.dirstate, k)
1031 except AttributeError:
1031 except AttributeError:
1032 pass
1032 pass
1033 delattr(self.unfiltered(), 'dirstate')
1033 delattr(self.unfiltered(), 'dirstate')
1034
1034
1035 def invalidate(self):
1035 def invalidate(self):
1036 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1036 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1037 for k in self._filecache:
1037 for k in self._filecache:
1038 # dirstate is invalidated separately in invalidatedirstate()
1038 # dirstate is invalidated separately in invalidatedirstate()
1039 if k == 'dirstate':
1039 if k == 'dirstate':
1040 continue
1040 continue
1041
1041
1042 try:
1042 try:
1043 delattr(unfiltered, k)
1043 delattr(unfiltered, k)
1044 except AttributeError:
1044 except AttributeError:
1045 pass
1045 pass
1046 self.invalidatecaches()
1046 self.invalidatecaches()
1047 self.store.invalidatecaches()
1047 self.store.invalidatecaches()
1048
1048
1049 def invalidateall(self):
1049 def invalidateall(self):
1050 '''Fully invalidates both store and non-store parts, causing the
1050 '''Fully invalidates both store and non-store parts, causing the
1051 subsequent operation to reread any outside changes.'''
1051 subsequent operation to reread any outside changes.'''
1052 # extension should hook this to invalidate its caches
1052 # extension should hook this to invalidate its caches
1053 self.invalidate()
1053 self.invalidate()
1054 self.invalidatedirstate()
1054 self.invalidatedirstate()
1055
1055
1056 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1056 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1057 try:
1057 try:
1058 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1058 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1059 except error.LockHeld, inst:
1059 except error.LockHeld, inst:
1060 if not wait:
1060 if not wait:
1061 raise
1061 raise
1062 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1062 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1063 (desc, inst.locker))
1063 (desc, inst.locker))
1064 # default to 600 seconds timeout
1064 # default to 600 seconds timeout
1065 l = lockmod.lock(vfs, lockname,
1065 l = lockmod.lock(vfs, lockname,
1066 int(self.ui.config("ui", "timeout", "600")),
1066 int(self.ui.config("ui", "timeout", "600")),
1067 releasefn, desc=desc)
1067 releasefn, desc=desc)
1068 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1068 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1069 if acquirefn:
1069 if acquirefn:
1070 acquirefn()
1070 acquirefn()
1071 return l
1071 return l
1072
1072
1073 def _afterlock(self, callback):
1073 def _afterlock(self, callback):
1074 """add a callback to the current repository lock.
1074 """add a callback to the current repository lock.
1075
1075
1076 The callback will be executed on lock release."""
1076 The callback will be executed on lock release."""
1077 l = self._lockref and self._lockref()
1077 l = self._lockref and self._lockref()
1078 if l:
1078 if l:
1079 l.postrelease.append(callback)
1079 l.postrelease.append(callback)
1080 else:
1080 else:
1081 callback()
1081 callback()
1082
1082
1083 def lock(self, wait=True):
1083 def lock(self, wait=True):
1084 '''Lock the repository store (.hg/store) and return a weak reference
1084 '''Lock the repository store (.hg/store) and return a weak reference
1085 to the lock. Use this before modifying the store (e.g. committing or
1085 to the lock. Use this before modifying the store (e.g. committing or
1086 stripping). If you are opening a transaction, get a lock as well.)'''
1086 stripping). If you are opening a transaction, get a lock as well.)'''
1087 l = self._lockref and self._lockref()
1087 l = self._lockref and self._lockref()
1088 if l is not None and l.held:
1088 if l is not None and l.held:
1089 l.lock()
1089 l.lock()
1090 return l
1090 return l
1091
1091
1092 def unlock():
1092 def unlock():
1093 for k, ce in self._filecache.items():
1093 for k, ce in self._filecache.items():
1094 if k == 'dirstate' or k not in self.__dict__:
1094 if k == 'dirstate' or k not in self.__dict__:
1095 continue
1095 continue
1096 ce.refresh()
1096 ce.refresh()
1097
1097
1098 l = self._lock(self.svfs, "lock", wait, unlock,
1098 l = self._lock(self.svfs, "lock", wait, unlock,
1099 self.invalidate, _('repository %s') % self.origroot)
1099 self.invalidate, _('repository %s') % self.origroot)
1100 self._lockref = weakref.ref(l)
1100 self._lockref = weakref.ref(l)
1101 return l
1101 return l
1102
1102
1103 def wlock(self, wait=True):
1103 def wlock(self, wait=True):
1104 '''Lock the non-store parts of the repository (everything under
1104 '''Lock the non-store parts of the repository (everything under
1105 .hg except .hg/store) and return a weak reference to the lock.
1105 .hg except .hg/store) and return a weak reference to the lock.
1106 Use this before modifying files in .hg.'''
1106 Use this before modifying files in .hg.'''
1107 l = self._wlockref and self._wlockref()
1107 l = self._wlockref and self._wlockref()
1108 if l is not None and l.held:
1108 if l is not None and l.held:
1109 l.lock()
1109 l.lock()
1110 return l
1110 return l
1111
1111
1112 def unlock():
1112 def unlock():
1113 if self.dirstate.pendingparentchange():
1113 if self.dirstate.pendingparentchange():
1114 self.dirstate.invalidate()
1114 self.dirstate.invalidate()
1115 else:
1115 else:
1116 self.dirstate.write()
1116 self.dirstate.write()
1117
1117
1118 self._filecache['dirstate'].refresh()
1118 self._filecache['dirstate'].refresh()
1119
1119
1120 l = self._lock(self.vfs, "wlock", wait, unlock,
1120 l = self._lock(self.vfs, "wlock", wait, unlock,
1121 self.invalidatedirstate, _('working directory of %s') %
1121 self.invalidatedirstate, _('working directory of %s') %
1122 self.origroot)
1122 self.origroot)
1123 self._wlockref = weakref.ref(l)
1123 self._wlockref = weakref.ref(l)
1124 return l
1124 return l
1125
1125
1126 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1126 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1127 """
1127 """
1128 commit an individual file as part of a larger transaction
1128 commit an individual file as part of a larger transaction
1129 """
1129 """
1130
1130
1131 fname = fctx.path()
1131 fname = fctx.path()
1132 text = fctx.data()
1132 text = fctx.data()
1133 flog = self.file(fname)
1133 flog = self.file(fname)
1134 fparent1 = manifest1.get(fname, nullid)
1134 fparent1 = manifest1.get(fname, nullid)
1135 fparent2 = manifest2.get(fname, nullid)
1135 fparent2 = manifest2.get(fname, nullid)
1136
1136
1137 meta = {}
1137 meta = {}
1138 copy = fctx.renamed()
1138 copy = fctx.renamed()
1139 if copy and copy[0] != fname:
1139 if copy and copy[0] != fname:
1140 # Mark the new revision of this file as a copy of another
1140 # Mark the new revision of this file as a copy of another
1141 # file. This copy data will effectively act as a parent
1141 # file. This copy data will effectively act as a parent
1142 # of this new revision. If this is a merge, the first
1142 # of this new revision. If this is a merge, the first
1143 # parent will be the nullid (meaning "look up the copy data")
1143 # parent will be the nullid (meaning "look up the copy data")
1144 # and the second one will be the other parent. For example:
1144 # and the second one will be the other parent. For example:
1145 #
1145 #
1146 # 0 --- 1 --- 3 rev1 changes file foo
1146 # 0 --- 1 --- 3 rev1 changes file foo
1147 # \ / rev2 renames foo to bar and changes it
1147 # \ / rev2 renames foo to bar and changes it
1148 # \- 2 -/ rev3 should have bar with all changes and
1148 # \- 2 -/ rev3 should have bar with all changes and
1149 # should record that bar descends from
1149 # should record that bar descends from
1150 # bar in rev2 and foo in rev1
1150 # bar in rev2 and foo in rev1
1151 #
1151 #
1152 # this allows this merge to succeed:
1152 # this allows this merge to succeed:
1153 #
1153 #
1154 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1154 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1155 # \ / merging rev3 and rev4 should use bar@rev2
1155 # \ / merging rev3 and rev4 should use bar@rev2
1156 # \- 2 --- 4 as the merge base
1156 # \- 2 --- 4 as the merge base
1157 #
1157 #
1158
1158
1159 cfname = copy[0]
1159 cfname = copy[0]
1160 crev = manifest1.get(cfname)
1160 crev = manifest1.get(cfname)
1161 newfparent = fparent2
1161 newfparent = fparent2
1162
1162
1163 if manifest2: # branch merge
1163 if manifest2: # branch merge
1164 if fparent2 == nullid or crev is None: # copied on remote side
1164 if fparent2 == nullid or crev is None: # copied on remote side
1165 if cfname in manifest2:
1165 if cfname in manifest2:
1166 crev = manifest2[cfname]
1166 crev = manifest2[cfname]
1167 newfparent = fparent1
1167 newfparent = fparent1
1168
1168
1169 # find source in nearest ancestor if we've lost track
1169 # find source in nearest ancestor if we've lost track
1170 if not crev:
1170 if not crev:
1171 self.ui.debug(" %s: searching for copy revision for %s\n" %
1171 self.ui.debug(" %s: searching for copy revision for %s\n" %
1172 (fname, cfname))
1172 (fname, cfname))
1173 for ancestor in self[None].ancestors():
1173 for ancestor in self[None].ancestors():
1174 if cfname in ancestor:
1174 if cfname in ancestor:
1175 crev = ancestor[cfname].filenode()
1175 crev = ancestor[cfname].filenode()
1176 break
1176 break
1177
1177
1178 if crev:
1178 if crev:
1179 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1179 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1180 meta["copy"] = cfname
1180 meta["copy"] = cfname
1181 meta["copyrev"] = hex(crev)
1181 meta["copyrev"] = hex(crev)
1182 fparent1, fparent2 = nullid, newfparent
1182 fparent1, fparent2 = nullid, newfparent
1183 else:
1183 else:
1184 self.ui.warn(_("warning: can't find ancestor for '%s' "
1184 self.ui.warn(_("warning: can't find ancestor for '%s' "
1185 "copied from '%s'!\n") % (fname, cfname))
1185 "copied from '%s'!\n") % (fname, cfname))
1186
1186
1187 elif fparent1 == nullid:
1187 elif fparent1 == nullid:
1188 fparent1, fparent2 = fparent2, nullid
1188 fparent1, fparent2 = fparent2, nullid
1189 elif fparent2 != nullid:
1189 elif fparent2 != nullid:
1190 # is one parent an ancestor of the other?
1190 # is one parent an ancestor of the other?
1191 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1191 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1192 if fparent1 in fparentancestors:
1192 if fparent1 in fparentancestors:
1193 fparent1, fparent2 = fparent2, nullid
1193 fparent1, fparent2 = fparent2, nullid
1194 elif fparent2 in fparentancestors:
1194 elif fparent2 in fparentancestors:
1195 fparent2 = nullid
1195 fparent2 = nullid
1196
1196
1197 # is the file changed?
1197 # is the file changed?
1198 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1198 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1199 changelist.append(fname)
1199 changelist.append(fname)
1200 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1200 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1201 # are just the flags changed during merge?
1201 # are just the flags changed during merge?
1202 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1202 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1203 changelist.append(fname)
1203 changelist.append(fname)
1204
1204
1205 return fparent1
1205 return fparent1
1206
1206
1207 @unfilteredmethod
1207 @unfilteredmethod
1208 def commit(self, text="", user=None, date=None, match=None, force=False,
1208 def commit(self, text="", user=None, date=None, match=None, force=False,
1209 editor=False, extra={}):
1209 editor=False, extra={}):
1210 """Add a new revision to current repository.
1210 """Add a new revision to current repository.
1211
1211
1212 Revision information is gathered from the working directory,
1212 Revision information is gathered from the working directory,
1213 match can be used to filter the committed files. If editor is
1213 match can be used to filter the committed files. If editor is
1214 supplied, it is called to get a commit message.
1214 supplied, it is called to get a commit message.
1215 """
1215 """
1216
1216
1217 def fail(f, msg):
1217 def fail(f, msg):
1218 raise util.Abort('%s: %s' % (f, msg))
1218 raise util.Abort('%s: %s' % (f, msg))
1219
1219
1220 if not match:
1220 if not match:
1221 match = matchmod.always(self.root, '')
1221 match = matchmod.always(self.root, '')
1222
1222
1223 if not force:
1223 if not force:
1224 vdirs = []
1224 vdirs = []
1225 match.explicitdir = vdirs.append
1225 match.explicitdir = vdirs.append
1226 match.bad = fail
1226 match.bad = fail
1227
1227
1228 wlock = self.wlock()
1228 wlock = self.wlock()
1229 try:
1229 try:
1230 wctx = self[None]
1230 wctx = self[None]
1231 merge = len(wctx.parents()) > 1
1231 merge = len(wctx.parents()) > 1
1232
1232
1233 if (not force and merge and match and
1233 if (not force and merge and match and
1234 (match.files() or match.anypats())):
1234 (match.files() or match.anypats())):
1235 raise util.Abort(_('cannot partially commit a merge '
1235 raise util.Abort(_('cannot partially commit a merge '
1236 '(do not specify files or patterns)'))
1236 '(do not specify files or patterns)'))
1237
1237
1238 changes = self.status(match=match, clean=force)
1238 changes = self.status(match=match, clean=force)
1239 if force:
1239 if force:
1240 changes[0].extend(changes[6]) # mq may commit unchanged files
1240 changes[0].extend(changes[6]) # mq may commit unchanged files
1241
1241
1242 # check subrepos
1242 # check subrepos
1243 subs = []
1243 subs = []
1244 commitsubs = set()
1244 commitsubs = set()
1245 newstate = wctx.substate.copy()
1245 newstate = wctx.substate.copy()
1246 # only manage subrepos and .hgsubstate if .hgsub is present
1246 # only manage subrepos and .hgsubstate if .hgsub is present
1247 if '.hgsub' in wctx:
1247 if '.hgsub' in wctx:
1248 # we'll decide whether to track this ourselves, thanks
1248 # we'll decide whether to track this ourselves, thanks
1249 for c in changes[:3]:
1249 for c in changes[:3]:
1250 if '.hgsubstate' in c:
1250 if '.hgsubstate' in c:
1251 c.remove('.hgsubstate')
1251 c.remove('.hgsubstate')
1252
1252
1253 # compare current state to last committed state
1253 # compare current state to last committed state
1254 # build new substate based on last committed state
1254 # build new substate based on last committed state
1255 oldstate = wctx.p1().substate
1255 oldstate = wctx.p1().substate
1256 for s in sorted(newstate.keys()):
1256 for s in sorted(newstate.keys()):
1257 if not match(s):
1257 if not match(s):
1258 # ignore working copy, use old state if present
1258 # ignore working copy, use old state if present
1259 if s in oldstate:
1259 if s in oldstate:
1260 newstate[s] = oldstate[s]
1260 newstate[s] = oldstate[s]
1261 continue
1261 continue
1262 if not force:
1262 if not force:
1263 raise util.Abort(
1263 raise util.Abort(
1264 _("commit with new subrepo %s excluded") % s)
1264 _("commit with new subrepo %s excluded") % s)
1265 if wctx.sub(s).dirty(True):
1265 if wctx.sub(s).dirty(True):
1266 if not self.ui.configbool('ui', 'commitsubrepos'):
1266 if not self.ui.configbool('ui', 'commitsubrepos'):
1267 raise util.Abort(
1267 raise util.Abort(
1268 _("uncommitted changes in subrepo %s") % s,
1268 _("uncommitted changes in subrepo %s") % s,
1269 hint=_("use --subrepos for recursive commit"))
1269 hint=_("use --subrepos for recursive commit"))
1270 subs.append(s)
1270 subs.append(s)
1271 commitsubs.add(s)
1271 commitsubs.add(s)
1272 else:
1272 else:
1273 bs = wctx.sub(s).basestate()
1273 bs = wctx.sub(s).basestate()
1274 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1274 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1275 if oldstate.get(s, (None, None, None))[1] != bs:
1275 if oldstate.get(s, (None, None, None))[1] != bs:
1276 subs.append(s)
1276 subs.append(s)
1277
1277
1278 # check for removed subrepos
1278 # check for removed subrepos
1279 for p in wctx.parents():
1279 for p in wctx.parents():
1280 r = [s for s in p.substate if s not in newstate]
1280 r = [s for s in p.substate if s not in newstate]
1281 subs += [s for s in r if match(s)]
1281 subs += [s for s in r if match(s)]
1282 if subs:
1282 if subs:
1283 if (not match('.hgsub') and
1283 if (not match('.hgsub') and
1284 '.hgsub' in (wctx.modified() + wctx.added())):
1284 '.hgsub' in (wctx.modified() + wctx.added())):
1285 raise util.Abort(
1285 raise util.Abort(
1286 _("can't commit subrepos without .hgsub"))
1286 _("can't commit subrepos without .hgsub"))
1287 changes[0].insert(0, '.hgsubstate')
1287 changes[0].insert(0, '.hgsubstate')
1288
1288
1289 elif '.hgsub' in changes[2]:
1289 elif '.hgsub' in changes[2]:
1290 # clean up .hgsubstate when .hgsub is removed
1290 # clean up .hgsubstate when .hgsub is removed
1291 if ('.hgsubstate' in wctx and
1291 if ('.hgsubstate' in wctx and
1292 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1292 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1293 changes[2].insert(0, '.hgsubstate')
1293 changes[2].insert(0, '.hgsubstate')
1294
1294
1295 # make sure all explicit patterns are matched
1295 # make sure all explicit patterns are matched
1296 if not force and match.files():
1296 if not force and match.files():
1297 matched = set(changes[0] + changes[1] + changes[2])
1297 matched = set(changes[0] + changes[1] + changes[2])
1298
1298
1299 for f in match.files():
1299 for f in match.files():
1300 f = self.dirstate.normalize(f)
1300 f = self.dirstate.normalize(f)
1301 if f == '.' or f in matched or f in wctx.substate:
1301 if f == '.' or f in matched or f in wctx.substate:
1302 continue
1302 continue
1303 if f in changes[3]: # missing
1303 if f in changes[3]: # missing
1304 fail(f, _('file not found!'))
1304 fail(f, _('file not found!'))
1305 if f in vdirs: # visited directory
1305 if f in vdirs: # visited directory
1306 d = f + '/'
1306 d = f + '/'
1307 for mf in matched:
1307 for mf in matched:
1308 if mf.startswith(d):
1308 if mf.startswith(d):
1309 break
1309 break
1310 else:
1310 else:
1311 fail(f, _("no match under directory!"))
1311 fail(f, _("no match under directory!"))
1312 elif f not in self.dirstate:
1312 elif f not in self.dirstate:
1313 fail(f, _("file not tracked!"))
1313 fail(f, _("file not tracked!"))
1314
1314
1315 cctx = context.workingctx(self, text, user, date, extra, changes)
1315 cctx = context.workingctx(self, text, user, date, extra, changes)
1316
1316
1317 if (not force and not extra.get("close") and not merge
1317 if (not force and not extra.get("close") and not merge
1318 and not cctx.files()
1318 and not cctx.files()
1319 and wctx.branch() == wctx.p1().branch()):
1319 and wctx.branch() == wctx.p1().branch()):
1320 return None
1320 return None
1321
1321
1322 if merge and cctx.deleted():
1322 if merge and cctx.deleted():
1323 raise util.Abort(_("cannot commit merge with missing files"))
1323 raise util.Abort(_("cannot commit merge with missing files"))
1324
1324
1325 ms = mergemod.mergestate(self)
1325 ms = mergemod.mergestate(self)
1326 for f in changes[0]:
1326 for f in changes[0]:
1327 if f in ms and ms[f] == 'u':
1327 if f in ms and ms[f] == 'u':
1328 raise util.Abort(_("unresolved merge conflicts "
1328 raise util.Abort(_("unresolved merge conflicts "
1329 "(see hg help resolve)"))
1329 "(see hg help resolve)"))
1330
1330
1331 if editor:
1331 if editor:
1332 cctx._text = editor(self, cctx, subs)
1332 cctx._text = editor(self, cctx, subs)
1333 edited = (text != cctx._text)
1333 edited = (text != cctx._text)
1334
1334
1335 # Save commit message in case this transaction gets rolled back
1335 # Save commit message in case this transaction gets rolled back
1336 # (e.g. by a pretxncommit hook). Leave the content alone on
1336 # (e.g. by a pretxncommit hook). Leave the content alone on
1337 # the assumption that the user will use the same editor again.
1337 # the assumption that the user will use the same editor again.
1338 msgfn = self.savecommitmessage(cctx._text)
1338 msgfn = self.savecommitmessage(cctx._text)
1339
1339
1340 # commit subs and write new state
1340 # commit subs and write new state
1341 if subs:
1341 if subs:
1342 for s in sorted(commitsubs):
1342 for s in sorted(commitsubs):
1343 sub = wctx.sub(s)
1343 sub = wctx.sub(s)
1344 self.ui.status(_('committing subrepository %s\n') %
1344 self.ui.status(_('committing subrepository %s\n') %
1345 subrepo.subrelpath(sub))
1345 subrepo.subrelpath(sub))
1346 sr = sub.commit(cctx._text, user, date)
1346 sr = sub.commit(cctx._text, user, date)
1347 newstate[s] = (newstate[s][0], sr)
1347 newstate[s] = (newstate[s][0], sr)
1348 subrepo.writestate(self, newstate)
1348 subrepo.writestate(self, newstate)
1349
1349
1350 p1, p2 = self.dirstate.parents()
1350 p1, p2 = self.dirstate.parents()
1351 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1351 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1352 try:
1352 try:
1353 self.hook("precommit", throw=True, parent1=hookp1,
1353 self.hook("precommit", throw=True, parent1=hookp1,
1354 parent2=hookp2)
1354 parent2=hookp2)
1355 ret = self.commitctx(cctx, True)
1355 ret = self.commitctx(cctx, True)
1356 except: # re-raises
1356 except: # re-raises
1357 if edited:
1357 if edited:
1358 self.ui.write(
1358 self.ui.write(
1359 _('note: commit message saved in %s\n') % msgfn)
1359 _('note: commit message saved in %s\n') % msgfn)
1360 raise
1360 raise
1361
1361
1362 # update bookmarks, dirstate and mergestate
1362 # update bookmarks, dirstate and mergestate
1363 bookmarks.update(self, [p1, p2], ret)
1363 bookmarks.update(self, [p1, p2], ret)
1364 cctx.markcommitted(ret)
1364 cctx.markcommitted(ret)
1365 ms.reset()
1365 ms.reset()
1366 finally:
1366 finally:
1367 wlock.release()
1367 wlock.release()
1368
1368
1369 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1369 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1370 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1370 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1371 self._afterlock(commithook)
1371 self._afterlock(commithook)
1372 return ret
1372 return ret
1373
1373
1374 @unfilteredmethod
1374 @unfilteredmethod
1375 def commitctx(self, ctx, error=False):
1375 def commitctx(self, ctx, error=False):
1376 """Add a new revision to current repository.
1376 """Add a new revision to current repository.
1377 Revision information is passed via the context argument.
1377 Revision information is passed via the context argument.
1378 """
1378 """
1379
1379
1380 tr = lock = None
1380 tr = None
1381 p1, p2 = ctx.p1(), ctx.p2()
1381 p1, p2 = ctx.p1(), ctx.p2()
1382 user = ctx.user()
1382 user = ctx.user()
1383
1383
1384 lock = self.lock()
1384 lock = self.lock()
1385 try:
1385 try:
1386 tr = self.transaction("commit")
1386 tr = self.transaction("commit")
1387 trp = weakref.proxy(tr)
1387 trp = weakref.proxy(tr)
1388
1388
1389 if ctx.files():
1389 if ctx.files():
1390 m1 = p1.manifest().copy()
1390 m1 = p1.manifest().copy()
1391 m2 = p2.manifest()
1391 m2 = p2.manifest()
1392
1392
1393 # check in files
1393 # check in files
1394 new = {}
1394 new = {}
1395 changed = []
1395 changed = []
1396 removed = list(ctx.removed())
1396 removed = list(ctx.removed())
1397 linkrev = len(self)
1397 linkrev = len(self)
1398 for f in sorted(ctx.modified() + ctx.added()):
1398 for f in sorted(ctx.modified() + ctx.added()):
1399 self.ui.note(f + "\n")
1399 self.ui.note(f + "\n")
1400 try:
1400 try:
1401 fctx = ctx[f]
1401 fctx = ctx[f]
1402 if fctx is None:
1402 if fctx is None:
1403 removed.append(f)
1403 removed.append(f)
1404 else:
1404 else:
1405 new[f] = self._filecommit(fctx, m1, m2, linkrev,
1405 new[f] = self._filecommit(fctx, m1, m2, linkrev,
1406 trp, changed)
1406 trp, changed)
1407 m1.set(f, fctx.flags())
1407 m1.set(f, fctx.flags())
1408 except OSError, inst:
1408 except OSError, inst:
1409 self.ui.warn(_("trouble committing %s!\n") % f)
1409 self.ui.warn(_("trouble committing %s!\n") % f)
1410 raise
1410 raise
1411 except IOError, inst:
1411 except IOError, inst:
1412 errcode = getattr(inst, 'errno', errno.ENOENT)
1412 errcode = getattr(inst, 'errno', errno.ENOENT)
1413 if error or errcode and errcode != errno.ENOENT:
1413 if error or errcode and errcode != errno.ENOENT:
1414 self.ui.warn(_("trouble committing %s!\n") % f)
1414 self.ui.warn(_("trouble committing %s!\n") % f)
1415 raise
1415 raise
1416
1416
1417 # update manifest
1417 # update manifest
1418 m1.update(new)
1418 m1.update(new)
1419 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1419 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1420 drop = [f for f in removed if f in m1]
1420 drop = [f for f in removed if f in m1]
1421 for f in drop:
1421 for f in drop:
1422 del m1[f]
1422 del m1[f]
1423 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1423 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1424 p2.manifestnode(), new, drop)
1424 p2.manifestnode(), new, drop)
1425 files = changed + removed
1425 files = changed + removed
1426 else:
1426 else:
1427 mn = p1.manifestnode()
1427 mn = p1.manifestnode()
1428 files = []
1428 files = []
1429
1429
1430 # update changelog
1430 # update changelog
1431 self.changelog.delayupdate()
1431 self.changelog.delayupdate()
1432 n = self.changelog.add(mn, files, ctx.description(),
1432 n = self.changelog.add(mn, files, ctx.description(),
1433 trp, p1.node(), p2.node(),
1433 trp, p1.node(), p2.node(),
1434 user, ctx.date(), ctx.extra().copy())
1434 user, ctx.date(), ctx.extra().copy())
1435 p = lambda: self.changelog.writepending() and self.root or ""
1435 p = lambda: self.changelog.writepending() and self.root or ""
1436 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1436 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1437 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1437 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1438 parent2=xp2, pending=p)
1438 parent2=xp2, pending=p)
1439 self.changelog.finalize(trp)
1439 self.changelog.finalize(trp)
1440 # set the new commit is proper phase
1440 # set the new commit is proper phase
1441 targetphase = subrepo.newcommitphase(self.ui, ctx)
1441 targetphase = subrepo.newcommitphase(self.ui, ctx)
1442 if targetphase:
1442 if targetphase:
1443 # retract boundary do not alter parent changeset.
1443 # retract boundary do not alter parent changeset.
1444 # if a parent have higher the resulting phase will
1444 # if a parent have higher the resulting phase will
1445 # be compliant anyway
1445 # be compliant anyway
1446 #
1446 #
1447 # if minimal phase was 0 we don't need to retract anything
1447 # if minimal phase was 0 we don't need to retract anything
1448 phases.retractboundary(self, tr, targetphase, [n])
1448 phases.retractboundary(self, tr, targetphase, [n])
1449 tr.close()
1449 tr.close()
1450 branchmap.updatecache(self.filtered('served'))
1450 branchmap.updatecache(self.filtered('served'))
1451 return n
1451 return n
1452 finally:
1452 finally:
1453 if tr:
1453 if tr:
1454 tr.release()
1454 tr.release()
1455 lock.release()
1455 lock.release()
1456
1456
1457 @unfilteredmethod
1457 @unfilteredmethod
1458 def destroying(self):
1458 def destroying(self):
1459 '''Inform the repository that nodes are about to be destroyed.
1459 '''Inform the repository that nodes are about to be destroyed.
1460 Intended for use by strip and rollback, so there's a common
1460 Intended for use by strip and rollback, so there's a common
1461 place for anything that has to be done before destroying history.
1461 place for anything that has to be done before destroying history.
1462
1462
1463 This is mostly useful for saving state that is in memory and waiting
1463 This is mostly useful for saving state that is in memory and waiting
1464 to be flushed when the current lock is released. Because a call to
1464 to be flushed when the current lock is released. Because a call to
1465 destroyed is imminent, the repo will be invalidated causing those
1465 destroyed is imminent, the repo will be invalidated causing those
1466 changes to stay in memory (waiting for the next unlock), or vanish
1466 changes to stay in memory (waiting for the next unlock), or vanish
1467 completely.
1467 completely.
1468 '''
1468 '''
1469 # When using the same lock to commit and strip, the phasecache is left
1469 # When using the same lock to commit and strip, the phasecache is left
1470 # dirty after committing. Then when we strip, the repo is invalidated,
1470 # dirty after committing. Then when we strip, the repo is invalidated,
1471 # causing those changes to disappear.
1471 # causing those changes to disappear.
1472 if '_phasecache' in vars(self):
1472 if '_phasecache' in vars(self):
1473 self._phasecache.write()
1473 self._phasecache.write()
1474
1474
1475 @unfilteredmethod
1475 @unfilteredmethod
1476 def destroyed(self):
1476 def destroyed(self):
1477 '''Inform the repository that nodes have been destroyed.
1477 '''Inform the repository that nodes have been destroyed.
1478 Intended for use by strip and rollback, so there's a common
1478 Intended for use by strip and rollback, so there's a common
1479 place for anything that has to be done after destroying history.
1479 place for anything that has to be done after destroying history.
1480 '''
1480 '''
1481 # When one tries to:
1481 # When one tries to:
1482 # 1) destroy nodes thus calling this method (e.g. strip)
1482 # 1) destroy nodes thus calling this method (e.g. strip)
1483 # 2) use phasecache somewhere (e.g. commit)
1483 # 2) use phasecache somewhere (e.g. commit)
1484 #
1484 #
1485 # then 2) will fail because the phasecache contains nodes that were
1485 # then 2) will fail because the phasecache contains nodes that were
1486 # removed. We can either remove phasecache from the filecache,
1486 # removed. We can either remove phasecache from the filecache,
1487 # causing it to reload next time it is accessed, or simply filter
1487 # causing it to reload next time it is accessed, or simply filter
1488 # the removed nodes now and write the updated cache.
1488 # the removed nodes now and write the updated cache.
1489 self._phasecache.filterunknown(self)
1489 self._phasecache.filterunknown(self)
1490 self._phasecache.write()
1490 self._phasecache.write()
1491
1491
1492 # update the 'served' branch cache to help read only server process
1492 # update the 'served' branch cache to help read only server process
1493 # Thanks to branchcache collaboration this is done from the nearest
1493 # Thanks to branchcache collaboration this is done from the nearest
1494 # filtered subset and it is expected to be fast.
1494 # filtered subset and it is expected to be fast.
1495 branchmap.updatecache(self.filtered('served'))
1495 branchmap.updatecache(self.filtered('served'))
1496
1496
1497 # Ensure the persistent tag cache is updated. Doing it now
1497 # Ensure the persistent tag cache is updated. Doing it now
1498 # means that the tag cache only has to worry about destroyed
1498 # means that the tag cache only has to worry about destroyed
1499 # heads immediately after a strip/rollback. That in turn
1499 # heads immediately after a strip/rollback. That in turn
1500 # guarantees that "cachetip == currenttip" (comparing both rev
1500 # guarantees that "cachetip == currenttip" (comparing both rev
1501 # and node) always means no nodes have been added or destroyed.
1501 # and node) always means no nodes have been added or destroyed.
1502
1502
1503 # XXX this is suboptimal when qrefresh'ing: we strip the current
1503 # XXX this is suboptimal when qrefresh'ing: we strip the current
1504 # head, refresh the tag cache, then immediately add a new head.
1504 # head, refresh the tag cache, then immediately add a new head.
1505 # But I think doing it this way is necessary for the "instant
1505 # But I think doing it this way is necessary for the "instant
1506 # tag cache retrieval" case to work.
1506 # tag cache retrieval" case to work.
1507 self.invalidate()
1507 self.invalidate()
1508
1508
1509 def walk(self, match, node=None):
1509 def walk(self, match, node=None):
1510 '''
1510 '''
1511 walk recursively through the directory tree or a given
1511 walk recursively through the directory tree or a given
1512 changeset, finding all files matched by the match
1512 changeset, finding all files matched by the match
1513 function
1513 function
1514 '''
1514 '''
1515 return self[node].walk(match)
1515 return self[node].walk(match)
1516
1516
1517 def status(self, node1='.', node2=None, match=None,
1517 def status(self, node1='.', node2=None, match=None,
1518 ignored=False, clean=False, unknown=False,
1518 ignored=False, clean=False, unknown=False,
1519 listsubrepos=False):
1519 listsubrepos=False):
1520 '''a convenience method that calls node1.status(node2)'''
1520 '''a convenience method that calls node1.status(node2)'''
1521 return self[node1].status(node2, match, ignored, clean, unknown,
1521 return self[node1].status(node2, match, ignored, clean, unknown,
1522 listsubrepos)
1522 listsubrepos)
1523
1523
1524 def heads(self, start=None):
1524 def heads(self, start=None):
1525 heads = self.changelog.heads(start)
1525 heads = self.changelog.heads(start)
1526 # sort the output in rev descending order
1526 # sort the output in rev descending order
1527 return sorted(heads, key=self.changelog.rev, reverse=True)
1527 return sorted(heads, key=self.changelog.rev, reverse=True)
1528
1528
1529 def branchheads(self, branch=None, start=None, closed=False):
1529 def branchheads(self, branch=None, start=None, closed=False):
1530 '''return a (possibly filtered) list of heads for the given branch
1530 '''return a (possibly filtered) list of heads for the given branch
1531
1531
1532 Heads are returned in topological order, from newest to oldest.
1532 Heads are returned in topological order, from newest to oldest.
1533 If branch is None, use the dirstate branch.
1533 If branch is None, use the dirstate branch.
1534 If start is not None, return only heads reachable from start.
1534 If start is not None, return only heads reachable from start.
1535 If closed is True, return heads that are marked as closed as well.
1535 If closed is True, return heads that are marked as closed as well.
1536 '''
1536 '''
1537 if branch is None:
1537 if branch is None:
1538 branch = self[None].branch()
1538 branch = self[None].branch()
1539 branches = self.branchmap()
1539 branches = self.branchmap()
1540 if branch not in branches:
1540 if branch not in branches:
1541 return []
1541 return []
1542 # the cache returns heads ordered lowest to highest
1542 # the cache returns heads ordered lowest to highest
1543 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1543 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1544 if start is not None:
1544 if start is not None:
1545 # filter out the heads that cannot be reached from startrev
1545 # filter out the heads that cannot be reached from startrev
1546 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1546 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1547 bheads = [h for h in bheads if h in fbheads]
1547 bheads = [h for h in bheads if h in fbheads]
1548 return bheads
1548 return bheads
1549
1549
1550 def branches(self, nodes):
1550 def branches(self, nodes):
1551 if not nodes:
1551 if not nodes:
1552 nodes = [self.changelog.tip()]
1552 nodes = [self.changelog.tip()]
1553 b = []
1553 b = []
1554 for n in nodes:
1554 for n in nodes:
1555 t = n
1555 t = n
1556 while True:
1556 while True:
1557 p = self.changelog.parents(n)
1557 p = self.changelog.parents(n)
1558 if p[1] != nullid or p[0] == nullid:
1558 if p[1] != nullid or p[0] == nullid:
1559 b.append((t, n, p[0], p[1]))
1559 b.append((t, n, p[0], p[1]))
1560 break
1560 break
1561 n = p[0]
1561 n = p[0]
1562 return b
1562 return b
1563
1563
1564 def between(self, pairs):
1564 def between(self, pairs):
1565 r = []
1565 r = []
1566
1566
1567 for top, bottom in pairs:
1567 for top, bottom in pairs:
1568 n, l, i = top, [], 0
1568 n, l, i = top, [], 0
1569 f = 1
1569 f = 1
1570
1570
1571 while n != bottom and n != nullid:
1571 while n != bottom and n != nullid:
1572 p = self.changelog.parents(n)[0]
1572 p = self.changelog.parents(n)[0]
1573 if i == f:
1573 if i == f:
1574 l.append(n)
1574 l.append(n)
1575 f = f * 2
1575 f = f * 2
1576 n = p
1576 n = p
1577 i += 1
1577 i += 1
1578
1578
1579 r.append(l)
1579 r.append(l)
1580
1580
1581 return r
1581 return r
1582
1582
1583 def checkpush(self, pushop):
1583 def checkpush(self, pushop):
1584 """Extensions can override this function if additional checks have
1584 """Extensions can override this function if additional checks have
1585 to be performed before pushing, or call it if they override push
1585 to be performed before pushing, or call it if they override push
1586 command.
1586 command.
1587 """
1587 """
1588 pass
1588 pass
1589
1589
1590 @unfilteredpropertycache
1590 @unfilteredpropertycache
1591 def prepushoutgoinghooks(self):
1591 def prepushoutgoinghooks(self):
1592 """Return util.hooks consists of "(repo, remote, outgoing)"
1592 """Return util.hooks consists of "(repo, remote, outgoing)"
1593 functions, which are called before pushing changesets.
1593 functions, which are called before pushing changesets.
1594 """
1594 """
1595 return util.hooks()
1595 return util.hooks()
1596
1596
1597 def stream_in(self, remote, requirements):
1597 def stream_in(self, remote, requirements):
1598 lock = self.lock()
1598 lock = self.lock()
1599 try:
1599 try:
1600 # Save remote branchmap. We will use it later
1600 # Save remote branchmap. We will use it later
1601 # to speed up branchcache creation
1601 # to speed up branchcache creation
1602 rbranchmap = None
1602 rbranchmap = None
1603 if remote.capable("branchmap"):
1603 if remote.capable("branchmap"):
1604 rbranchmap = remote.branchmap()
1604 rbranchmap = remote.branchmap()
1605
1605
1606 fp = remote.stream_out()
1606 fp = remote.stream_out()
1607 l = fp.readline()
1607 l = fp.readline()
1608 try:
1608 try:
1609 resp = int(l)
1609 resp = int(l)
1610 except ValueError:
1610 except ValueError:
1611 raise error.ResponseError(
1611 raise error.ResponseError(
1612 _('unexpected response from remote server:'), l)
1612 _('unexpected response from remote server:'), l)
1613 if resp == 1:
1613 if resp == 1:
1614 raise util.Abort(_('operation forbidden by server'))
1614 raise util.Abort(_('operation forbidden by server'))
1615 elif resp == 2:
1615 elif resp == 2:
1616 raise util.Abort(_('locking the remote repository failed'))
1616 raise util.Abort(_('locking the remote repository failed'))
1617 elif resp != 0:
1617 elif resp != 0:
1618 raise util.Abort(_('the server sent an unknown error code'))
1618 raise util.Abort(_('the server sent an unknown error code'))
1619 self.ui.status(_('streaming all changes\n'))
1619 self.ui.status(_('streaming all changes\n'))
1620 l = fp.readline()
1620 l = fp.readline()
1621 try:
1621 try:
1622 total_files, total_bytes = map(int, l.split(' ', 1))
1622 total_files, total_bytes = map(int, l.split(' ', 1))
1623 except (ValueError, TypeError):
1623 except (ValueError, TypeError):
1624 raise error.ResponseError(
1624 raise error.ResponseError(
1625 _('unexpected response from remote server:'), l)
1625 _('unexpected response from remote server:'), l)
1626 self.ui.status(_('%d files to transfer, %s of data\n') %
1626 self.ui.status(_('%d files to transfer, %s of data\n') %
1627 (total_files, util.bytecount(total_bytes)))
1627 (total_files, util.bytecount(total_bytes)))
1628 handled_bytes = 0
1628 handled_bytes = 0
1629 self.ui.progress(_('clone'), 0, total=total_bytes)
1629 self.ui.progress(_('clone'), 0, total=total_bytes)
1630 start = time.time()
1630 start = time.time()
1631
1631
1632 tr = self.transaction(_('clone'))
1632 tr = self.transaction(_('clone'))
1633 try:
1633 try:
1634 for i in xrange(total_files):
1634 for i in xrange(total_files):
1635 # XXX doesn't support '\n' or '\r' in filenames
1635 # XXX doesn't support '\n' or '\r' in filenames
1636 l = fp.readline()
1636 l = fp.readline()
1637 try:
1637 try:
1638 name, size = l.split('\0', 1)
1638 name, size = l.split('\0', 1)
1639 size = int(size)
1639 size = int(size)
1640 except (ValueError, TypeError):
1640 except (ValueError, TypeError):
1641 raise error.ResponseError(
1641 raise error.ResponseError(
1642 _('unexpected response from remote server:'), l)
1642 _('unexpected response from remote server:'), l)
1643 if self.ui.debugflag:
1643 if self.ui.debugflag:
1644 self.ui.debug('adding %s (%s)\n' %
1644 self.ui.debug('adding %s (%s)\n' %
1645 (name, util.bytecount(size)))
1645 (name, util.bytecount(size)))
1646 # for backwards compat, name was partially encoded
1646 # for backwards compat, name was partially encoded
1647 ofp = self.sopener(store.decodedir(name), 'w')
1647 ofp = self.sopener(store.decodedir(name), 'w')
1648 for chunk in util.filechunkiter(fp, limit=size):
1648 for chunk in util.filechunkiter(fp, limit=size):
1649 handled_bytes += len(chunk)
1649 handled_bytes += len(chunk)
1650 self.ui.progress(_('clone'), handled_bytes,
1650 self.ui.progress(_('clone'), handled_bytes,
1651 total=total_bytes)
1651 total=total_bytes)
1652 ofp.write(chunk)
1652 ofp.write(chunk)
1653 ofp.close()
1653 ofp.close()
1654 tr.close()
1654 tr.close()
1655 finally:
1655 finally:
1656 tr.release()
1656 tr.release()
1657
1657
1658 # Writing straight to files circumvented the inmemory caches
1658 # Writing straight to files circumvented the inmemory caches
1659 self.invalidate()
1659 self.invalidate()
1660
1660
1661 elapsed = time.time() - start
1661 elapsed = time.time() - start
1662 if elapsed <= 0:
1662 if elapsed <= 0:
1663 elapsed = 0.001
1663 elapsed = 0.001
1664 self.ui.progress(_('clone'), None)
1664 self.ui.progress(_('clone'), None)
1665 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1665 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1666 (util.bytecount(total_bytes), elapsed,
1666 (util.bytecount(total_bytes), elapsed,
1667 util.bytecount(total_bytes / elapsed)))
1667 util.bytecount(total_bytes / elapsed)))
1668
1668
1669 # new requirements = old non-format requirements +
1669 # new requirements = old non-format requirements +
1670 # new format-related
1670 # new format-related
1671 # requirements from the streamed-in repository
1671 # requirements from the streamed-in repository
1672 requirements.update(set(self.requirements) - self.supportedformats)
1672 requirements.update(set(self.requirements) - self.supportedformats)
1673 self._applyrequirements(requirements)
1673 self._applyrequirements(requirements)
1674 self._writerequirements()
1674 self._writerequirements()
1675
1675
1676 if rbranchmap:
1676 if rbranchmap:
1677 rbheads = []
1677 rbheads = []
1678 for bheads in rbranchmap.itervalues():
1678 for bheads in rbranchmap.itervalues():
1679 rbheads.extend(bheads)
1679 rbheads.extend(bheads)
1680
1680
1681 if rbheads:
1681 if rbheads:
1682 rtiprev = max((int(self.changelog.rev(node))
1682 rtiprev = max((int(self.changelog.rev(node))
1683 for node in rbheads))
1683 for node in rbheads))
1684 cache = branchmap.branchcache(rbranchmap,
1684 cache = branchmap.branchcache(rbranchmap,
1685 self[rtiprev].node(),
1685 self[rtiprev].node(),
1686 rtiprev)
1686 rtiprev)
1687 # Try to stick it as low as possible
1687 # Try to stick it as low as possible
1688 # filter above served are unlikely to be fetch from a clone
1688 # filter above served are unlikely to be fetch from a clone
1689 for candidate in ('base', 'immutable', 'served'):
1689 for candidate in ('base', 'immutable', 'served'):
1690 rview = self.filtered(candidate)
1690 rview = self.filtered(candidate)
1691 if cache.validfor(rview):
1691 if cache.validfor(rview):
1692 self._branchcaches[candidate] = cache
1692 self._branchcaches[candidate] = cache
1693 cache.write(rview)
1693 cache.write(rview)
1694 break
1694 break
1695 self.invalidate()
1695 self.invalidate()
1696 return len(self.heads()) + 1
1696 return len(self.heads()) + 1
1697 finally:
1697 finally:
1698 lock.release()
1698 lock.release()
1699
1699
1700 def clone(self, remote, heads=[], stream=False):
1700 def clone(self, remote, heads=[], stream=False):
1701 '''clone remote repository.
1701 '''clone remote repository.
1702
1702
1703 keyword arguments:
1703 keyword arguments:
1704 heads: list of revs to clone (forces use of pull)
1704 heads: list of revs to clone (forces use of pull)
1705 stream: use streaming clone if possible'''
1705 stream: use streaming clone if possible'''
1706
1706
1707 # now, all clients that can request uncompressed clones can
1707 # now, all clients that can request uncompressed clones can
1708 # read repo formats supported by all servers that can serve
1708 # read repo formats supported by all servers that can serve
1709 # them.
1709 # them.
1710
1710
1711 # if revlog format changes, client will have to check version
1711 # if revlog format changes, client will have to check version
1712 # and format flags on "stream" capability, and use
1712 # and format flags on "stream" capability, and use
1713 # uncompressed only if compatible.
1713 # uncompressed only if compatible.
1714
1714
1715 if not stream:
1715 if not stream:
1716 # if the server explicitly prefers to stream (for fast LANs)
1716 # if the server explicitly prefers to stream (for fast LANs)
1717 stream = remote.capable('stream-preferred')
1717 stream = remote.capable('stream-preferred')
1718
1718
1719 if stream and not heads:
1719 if stream and not heads:
1720 # 'stream' means remote revlog format is revlogv1 only
1720 # 'stream' means remote revlog format is revlogv1 only
1721 if remote.capable('stream'):
1721 if remote.capable('stream'):
1722 return self.stream_in(remote, set(('revlogv1',)))
1722 return self.stream_in(remote, set(('revlogv1',)))
1723 # otherwise, 'streamreqs' contains the remote revlog format
1723 # otherwise, 'streamreqs' contains the remote revlog format
1724 streamreqs = remote.capable('streamreqs')
1724 streamreqs = remote.capable('streamreqs')
1725 if streamreqs:
1725 if streamreqs:
1726 streamreqs = set(streamreqs.split(','))
1726 streamreqs = set(streamreqs.split(','))
1727 # if we support it, stream in and adjust our requirements
1727 # if we support it, stream in and adjust our requirements
1728 if not streamreqs - self.supportedformats:
1728 if not streamreqs - self.supportedformats:
1729 return self.stream_in(remote, streamreqs)
1729 return self.stream_in(remote, streamreqs)
1730
1730
1731 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1731 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1732 try:
1732 try:
1733 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1733 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1734 ret = exchange.pull(self, remote, heads).cgresult
1734 ret = exchange.pull(self, remote, heads).cgresult
1735 finally:
1735 finally:
1736 self.ui.restoreconfig(quiet)
1736 self.ui.restoreconfig(quiet)
1737 return ret
1737 return ret
1738
1738
1739 def pushkey(self, namespace, key, old, new):
1739 def pushkey(self, namespace, key, old, new):
1740 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1740 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1741 old=old, new=new)
1741 old=old, new=new)
1742 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1742 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1743 ret = pushkey.push(self, namespace, key, old, new)
1743 ret = pushkey.push(self, namespace, key, old, new)
1744 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1744 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1745 ret=ret)
1745 ret=ret)
1746 return ret
1746 return ret
1747
1747
1748 def listkeys(self, namespace):
1748 def listkeys(self, namespace):
1749 self.hook('prelistkeys', throw=True, namespace=namespace)
1749 self.hook('prelistkeys', throw=True, namespace=namespace)
1750 self.ui.debug('listing keys for "%s"\n' % namespace)
1750 self.ui.debug('listing keys for "%s"\n' % namespace)
1751 values = pushkey.list(self, namespace)
1751 values = pushkey.list(self, namespace)
1752 self.hook('listkeys', namespace=namespace, values=values)
1752 self.hook('listkeys', namespace=namespace, values=values)
1753 return values
1753 return values
1754
1754
1755 def debugwireargs(self, one, two, three=None, four=None, five=None):
1755 def debugwireargs(self, one, two, three=None, four=None, five=None):
1756 '''used to test argument passing over the wire'''
1756 '''used to test argument passing over the wire'''
1757 return "%s %s %s %s %s" % (one, two, three, four, five)
1757 return "%s %s %s %s %s" % (one, two, three, four, five)
1758
1758
1759 def savecommitmessage(self, text):
1759 def savecommitmessage(self, text):
1760 fp = self.opener('last-message.txt', 'wb')
1760 fp = self.opener('last-message.txt', 'wb')
1761 try:
1761 try:
1762 fp.write(text)
1762 fp.write(text)
1763 finally:
1763 finally:
1764 fp.close()
1764 fp.close()
1765 return self.pathto(fp.name[len(self.root) + 1:])
1765 return self.pathto(fp.name[len(self.root) + 1:])
1766
1766
1767 # used to avoid circular references so destructors work
1767 # used to avoid circular references so destructors work
1768 def aftertrans(files):
1768 def aftertrans(files):
1769 renamefiles = [tuple(t) for t in files]
1769 renamefiles = [tuple(t) for t in files]
1770 def a():
1770 def a():
1771 for vfs, src, dest in renamefiles:
1771 for vfs, src, dest in renamefiles:
1772 try:
1772 try:
1773 vfs.rename(src, dest)
1773 vfs.rename(src, dest)
1774 except OSError: # journal file does not yet exist
1774 except OSError: # journal file does not yet exist
1775 pass
1775 pass
1776 return a
1776 return a
1777
1777
1778 def undoname(fn):
1778 def undoname(fn):
1779 base, name = os.path.split(fn)
1779 base, name = os.path.split(fn)
1780 assert name.startswith('journal')
1780 assert name.startswith('journal')
1781 return os.path.join(base, name.replace('journal', 'undo', 1))
1781 return os.path.join(base, name.replace('journal', 'undo', 1))
1782
1782
1783 def instance(ui, path, create):
1783 def instance(ui, path, create):
1784 return localrepository(ui, util.urllocalpath(path), create)
1784 return localrepository(ui, util.urllocalpath(path), create)
1785
1785
1786 def islocal(path):
1786 def islocal(path):
1787 return True
1787 return True
General Comments 0
You need to be logged in to leave comments. Login now