##// END OF EJS Templates
localrepo: access status fields by name rather than index
Martin von Zweigbergk -
r22928:5e5d297c default
parent child Browse files
Show More
@@ -1,1789 +1,1790 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 # a list of (ui, featureset) functions.
183 # a list of (ui, featureset) functions.
184 # only functions defined in module of enabled extensions are invoked
184 # only functions defined in module of enabled extensions are invoked
185 featuresetupfuncs = set()
185 featuresetupfuncs = set()
186
186
187 def _baserequirements(self, create):
187 def _baserequirements(self, create):
188 return self.requirements[:]
188 return self.requirements[:]
189
189
190 def __init__(self, baseui, path=None, create=False):
190 def __init__(self, baseui, path=None, create=False):
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wopener = self.wvfs
192 self.wopener = self.wvfs
193 self.root = self.wvfs.base
193 self.root = self.wvfs.base
194 self.path = self.wvfs.join(".hg")
194 self.path = self.wvfs.join(".hg")
195 self.origroot = path
195 self.origroot = path
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.vfs = scmutil.vfs(self.path)
197 self.vfs = scmutil.vfs(self.path)
198 self.opener = self.vfs
198 self.opener = self.vfs
199 self.baseui = baseui
199 self.baseui = baseui
200 self.ui = baseui.copy()
200 self.ui = baseui.copy()
201 self.ui.copy = baseui.copy # prevent copying repo configuration
201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 # A list of callback to shape the phase if no data were found.
202 # A list of callback to shape the phase if no data were found.
203 # Callback are in the form: func(repo, roots) --> processed root.
203 # Callback are in the form: func(repo, roots) --> processed root.
204 # This list it to be filled by extension during repo setup
204 # This list it to be filled by extension during repo setup
205 self._phasedefaults = []
205 self._phasedefaults = []
206 try:
206 try:
207 self.ui.readconfig(self.join("hgrc"), self.root)
207 self.ui.readconfig(self.join("hgrc"), self.root)
208 extensions.loadall(self.ui)
208 extensions.loadall(self.ui)
209 except IOError:
209 except IOError:
210 pass
210 pass
211
211
212 if self.featuresetupfuncs:
212 if self.featuresetupfuncs:
213 self.supported = set(self._basesupported) # use private copy
213 self.supported = set(self._basesupported) # use private copy
214 extmods = set(m.__name__ for n, m
214 extmods = set(m.__name__ for n, m
215 in extensions.extensions(self.ui))
215 in extensions.extensions(self.ui))
216 for setupfunc in self.featuresetupfuncs:
216 for setupfunc in self.featuresetupfuncs:
217 if setupfunc.__module__ in extmods:
217 if setupfunc.__module__ in extmods:
218 setupfunc(self.ui, self.supported)
218 setupfunc(self.ui, self.supported)
219 else:
219 else:
220 self.supported = self._basesupported
220 self.supported = self._basesupported
221
221
222 if not self.vfs.isdir():
222 if not self.vfs.isdir():
223 if create:
223 if create:
224 if not self.wvfs.exists():
224 if not self.wvfs.exists():
225 self.wvfs.makedirs()
225 self.wvfs.makedirs()
226 self.vfs.makedir(notindexed=True)
226 self.vfs.makedir(notindexed=True)
227 requirements = self._baserequirements(create)
227 requirements = self._baserequirements(create)
228 if self.ui.configbool('format', 'usestore', True):
228 if self.ui.configbool('format', 'usestore', True):
229 self.vfs.mkdir("store")
229 self.vfs.mkdir("store")
230 requirements.append("store")
230 requirements.append("store")
231 if self.ui.configbool('format', 'usefncache', True):
231 if self.ui.configbool('format', 'usefncache', True):
232 requirements.append("fncache")
232 requirements.append("fncache")
233 if self.ui.configbool('format', 'dotencode', True):
233 if self.ui.configbool('format', 'dotencode', True):
234 requirements.append('dotencode')
234 requirements.append('dotencode')
235 # create an invalid changelog
235 # create an invalid changelog
236 self.vfs.append(
236 self.vfs.append(
237 "00changelog.i",
237 "00changelog.i",
238 '\0\0\0\2' # represents revlogv2
238 '\0\0\0\2' # represents revlogv2
239 ' dummy changelog to prevent using the old repo layout'
239 ' dummy changelog to prevent using the old repo layout'
240 )
240 )
241 if self.ui.configbool('format', 'generaldelta', False):
241 if self.ui.configbool('format', 'generaldelta', False):
242 requirements.append("generaldelta")
242 requirements.append("generaldelta")
243 requirements = set(requirements)
243 requirements = set(requirements)
244 else:
244 else:
245 raise error.RepoError(_("repository %s not found") % path)
245 raise error.RepoError(_("repository %s not found") % path)
246 elif create:
246 elif create:
247 raise error.RepoError(_("repository %s already exists") % path)
247 raise error.RepoError(_("repository %s already exists") % path)
248 else:
248 else:
249 try:
249 try:
250 requirements = scmutil.readrequires(self.vfs, self.supported)
250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 except IOError, inst:
251 except IOError, inst:
252 if inst.errno != errno.ENOENT:
252 if inst.errno != errno.ENOENT:
253 raise
253 raise
254 requirements = set()
254 requirements = set()
255
255
256 self.sharedpath = self.path
256 self.sharedpath = self.path
257 try:
257 try:
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 realpath=True)
259 realpath=True)
260 s = vfs.base
260 s = vfs.base
261 if not vfs.exists():
261 if not vfs.exists():
262 raise error.RepoError(
262 raise error.RepoError(
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 self.sharedpath = s
264 self.sharedpath = s
265 except IOError, inst:
265 except IOError, inst:
266 if inst.errno != errno.ENOENT:
266 if inst.errno != errno.ENOENT:
267 raise
267 raise
268
268
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.spath = self.store.path
270 self.spath = self.store.path
271 self.svfs = self.store.vfs
271 self.svfs = self.store.vfs
272 self.sopener = self.svfs
272 self.sopener = self.svfs
273 self.sjoin = self.store.join
273 self.sjoin = self.store.join
274 self.vfs.createmode = self.store.createmode
274 self.vfs.createmode = self.store.createmode
275 self._applyrequirements(requirements)
275 self._applyrequirements(requirements)
276 if create:
276 if create:
277 self._writerequirements()
277 self._writerequirements()
278
278
279
279
280 self._branchcaches = {}
280 self._branchcaches = {}
281 self.filterpats = {}
281 self.filterpats = {}
282 self._datafilters = {}
282 self._datafilters = {}
283 self._transref = self._lockref = self._wlockref = None
283 self._transref = self._lockref = self._wlockref = None
284
284
285 # A cache for various files under .hg/ that tracks file changes,
285 # A cache for various files under .hg/ that tracks file changes,
286 # (used by the filecache decorator)
286 # (used by the filecache decorator)
287 #
287 #
288 # Maps a property name to its util.filecacheentry
288 # Maps a property name to its util.filecacheentry
289 self._filecache = {}
289 self._filecache = {}
290
290
291 # hold sets of revision to be filtered
291 # hold sets of revision to be filtered
292 # should be cleared when something might have changed the filter value:
292 # should be cleared when something might have changed the filter value:
293 # - new changesets,
293 # - new changesets,
294 # - phase change,
294 # - phase change,
295 # - new obsolescence marker,
295 # - new obsolescence marker,
296 # - working directory parent change,
296 # - working directory parent change,
297 # - bookmark changes
297 # - bookmark changes
298 self.filteredrevcache = {}
298 self.filteredrevcache = {}
299
299
300 def close(self):
300 def close(self):
301 pass
301 pass
302
302
303 def _restrictcapabilities(self, caps):
303 def _restrictcapabilities(self, caps):
304 # bundle2 is not ready for prime time, drop it unless explicitly
304 # bundle2 is not ready for prime time, drop it unless explicitly
305 # required by the tests (or some brave tester)
305 # required by the tests (or some brave tester)
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 caps = set(caps)
307 caps = set(caps)
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 return caps
310 return caps
311
311
312 def _applyrequirements(self, requirements):
312 def _applyrequirements(self, requirements):
313 self.requirements = requirements
313 self.requirements = requirements
314 self.sopener.options = dict((r, 1) for r in requirements
314 self.sopener.options = dict((r, 1) for r in requirements
315 if r in self.openerreqs)
315 if r in self.openerreqs)
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 if chunkcachesize is not None:
317 if chunkcachesize is not None:
318 self.sopener.options['chunkcachesize'] = chunkcachesize
318 self.sopener.options['chunkcachesize'] = chunkcachesize
319
319
320 def _writerequirements(self):
320 def _writerequirements(self):
321 reqfile = self.opener("requires", "w")
321 reqfile = self.opener("requires", "w")
322 for r in sorted(self.requirements):
322 for r in sorted(self.requirements):
323 reqfile.write("%s\n" % r)
323 reqfile.write("%s\n" % r)
324 reqfile.close()
324 reqfile.close()
325
325
326 def _checknested(self, path):
326 def _checknested(self, path):
327 """Determine if path is a legal nested repository."""
327 """Determine if path is a legal nested repository."""
328 if not path.startswith(self.root):
328 if not path.startswith(self.root):
329 return False
329 return False
330 subpath = path[len(self.root) + 1:]
330 subpath = path[len(self.root) + 1:]
331 normsubpath = util.pconvert(subpath)
331 normsubpath = util.pconvert(subpath)
332
332
333 # XXX: Checking against the current working copy is wrong in
333 # XXX: Checking against the current working copy is wrong in
334 # the sense that it can reject things like
334 # the sense that it can reject things like
335 #
335 #
336 # $ hg cat -r 10 sub/x.txt
336 # $ hg cat -r 10 sub/x.txt
337 #
337 #
338 # if sub/ is no longer a subrepository in the working copy
338 # if sub/ is no longer a subrepository in the working copy
339 # parent revision.
339 # parent revision.
340 #
340 #
341 # However, it can of course also allow things that would have
341 # However, it can of course also allow things that would have
342 # been rejected before, such as the above cat command if sub/
342 # been rejected before, such as the above cat command if sub/
343 # is a subrepository now, but was a normal directory before.
343 # is a subrepository now, but was a normal directory before.
344 # The old path auditor would have rejected by mistake since it
344 # The old path auditor would have rejected by mistake since it
345 # panics when it sees sub/.hg/.
345 # panics when it sees sub/.hg/.
346 #
346 #
347 # All in all, checking against the working copy seems sensible
347 # All in all, checking against the working copy seems sensible
348 # since we want to prevent access to nested repositories on
348 # since we want to prevent access to nested repositories on
349 # the filesystem *now*.
349 # the filesystem *now*.
350 ctx = self[None]
350 ctx = self[None]
351 parts = util.splitpath(subpath)
351 parts = util.splitpath(subpath)
352 while parts:
352 while parts:
353 prefix = '/'.join(parts)
353 prefix = '/'.join(parts)
354 if prefix in ctx.substate:
354 if prefix in ctx.substate:
355 if prefix == normsubpath:
355 if prefix == normsubpath:
356 return True
356 return True
357 else:
357 else:
358 sub = ctx.sub(prefix)
358 sub = ctx.sub(prefix)
359 return sub.checknested(subpath[len(prefix) + 1:])
359 return sub.checknested(subpath[len(prefix) + 1:])
360 else:
360 else:
361 parts.pop()
361 parts.pop()
362 return False
362 return False
363
363
364 def peer(self):
364 def peer(self):
365 return localpeer(self) # not cached to avoid reference cycle
365 return localpeer(self) # not cached to avoid reference cycle
366
366
367 def unfiltered(self):
367 def unfiltered(self):
368 """Return unfiltered version of the repository
368 """Return unfiltered version of the repository
369
369
370 Intended to be overwritten by filtered repo."""
370 Intended to be overwritten by filtered repo."""
371 return self
371 return self
372
372
373 def filtered(self, name):
373 def filtered(self, name):
374 """Return a filtered version of a repository"""
374 """Return a filtered version of a repository"""
375 # build a new class with the mixin and the current class
375 # build a new class with the mixin and the current class
376 # (possibly subclass of the repo)
376 # (possibly subclass of the repo)
377 class proxycls(repoview.repoview, self.unfiltered().__class__):
377 class proxycls(repoview.repoview, self.unfiltered().__class__):
378 pass
378 pass
379 return proxycls(self, name)
379 return proxycls(self, name)
380
380
381 @repofilecache('bookmarks')
381 @repofilecache('bookmarks')
382 def _bookmarks(self):
382 def _bookmarks(self):
383 return bookmarks.bmstore(self)
383 return bookmarks.bmstore(self)
384
384
385 @repofilecache('bookmarks.current')
385 @repofilecache('bookmarks.current')
386 def _bookmarkcurrent(self):
386 def _bookmarkcurrent(self):
387 return bookmarks.readcurrent(self)
387 return bookmarks.readcurrent(self)
388
388
389 def bookmarkheads(self, bookmark):
389 def bookmarkheads(self, bookmark):
390 name = bookmark.split('@', 1)[0]
390 name = bookmark.split('@', 1)[0]
391 heads = []
391 heads = []
392 for mark, n in self._bookmarks.iteritems():
392 for mark, n in self._bookmarks.iteritems():
393 if mark.split('@', 1)[0] == name:
393 if mark.split('@', 1)[0] == name:
394 heads.append(n)
394 heads.append(n)
395 return heads
395 return heads
396
396
397 @storecache('phaseroots')
397 @storecache('phaseroots')
398 def _phasecache(self):
398 def _phasecache(self):
399 return phases.phasecache(self, self._phasedefaults)
399 return phases.phasecache(self, self._phasedefaults)
400
400
401 @storecache('obsstore')
401 @storecache('obsstore')
402 def obsstore(self):
402 def obsstore(self):
403 # read default format for new obsstore.
403 # read default format for new obsstore.
404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
405 # rely on obsstore class default when possible.
405 # rely on obsstore class default when possible.
406 kwargs = {}
406 kwargs = {}
407 if defaultformat is not None:
407 if defaultformat is not None:
408 defaultformat['defaultformat'] = defaultformat
408 defaultformat['defaultformat'] = defaultformat
409 store = obsolete.obsstore(self.sopener, **kwargs)
409 store = obsolete.obsstore(self.sopener, **kwargs)
410 if store and not obsolete._enabled:
410 if store and not obsolete._enabled:
411 # message is rare enough to not be translated
411 # message is rare enough to not be translated
412 msg = 'obsolete feature not enabled but %i markers found!\n'
412 msg = 'obsolete feature not enabled but %i markers found!\n'
413 self.ui.warn(msg % len(list(store)))
413 self.ui.warn(msg % len(list(store)))
414 return store
414 return store
415
415
416 @storecache('00changelog.i')
416 @storecache('00changelog.i')
417 def changelog(self):
417 def changelog(self):
418 c = changelog.changelog(self.sopener)
418 c = changelog.changelog(self.sopener)
419 if 'HG_PENDING' in os.environ:
419 if 'HG_PENDING' in os.environ:
420 p = os.environ['HG_PENDING']
420 p = os.environ['HG_PENDING']
421 if p.startswith(self.root):
421 if p.startswith(self.root):
422 c.readpending('00changelog.i.a')
422 c.readpending('00changelog.i.a')
423 return c
423 return c
424
424
425 @storecache('00manifest.i')
425 @storecache('00manifest.i')
426 def manifest(self):
426 def manifest(self):
427 return manifest.manifest(self.sopener)
427 return manifest.manifest(self.sopener)
428
428
429 @repofilecache('dirstate')
429 @repofilecache('dirstate')
430 def dirstate(self):
430 def dirstate(self):
431 warned = [0]
431 warned = [0]
432 def validate(node):
432 def validate(node):
433 try:
433 try:
434 self.changelog.rev(node)
434 self.changelog.rev(node)
435 return node
435 return node
436 except error.LookupError:
436 except error.LookupError:
437 if not warned[0]:
437 if not warned[0]:
438 warned[0] = True
438 warned[0] = True
439 self.ui.warn(_("warning: ignoring unknown"
439 self.ui.warn(_("warning: ignoring unknown"
440 " working parent %s!\n") % short(node))
440 " working parent %s!\n") % short(node))
441 return nullid
441 return nullid
442
442
443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
444
444
445 def __getitem__(self, changeid):
445 def __getitem__(self, changeid):
446 if changeid is None:
446 if changeid is None:
447 return context.workingctx(self)
447 return context.workingctx(self)
448 return context.changectx(self, changeid)
448 return context.changectx(self, changeid)
449
449
450 def __contains__(self, changeid):
450 def __contains__(self, changeid):
451 try:
451 try:
452 return bool(self.lookup(changeid))
452 return bool(self.lookup(changeid))
453 except error.RepoLookupError:
453 except error.RepoLookupError:
454 return False
454 return False
455
455
456 def __nonzero__(self):
456 def __nonzero__(self):
457 return True
457 return True
458
458
459 def __len__(self):
459 def __len__(self):
460 return len(self.changelog)
460 return len(self.changelog)
461
461
462 def __iter__(self):
462 def __iter__(self):
463 return iter(self.changelog)
463 return iter(self.changelog)
464
464
465 def revs(self, expr, *args):
465 def revs(self, expr, *args):
466 '''Return a list of revisions matching the given revset'''
466 '''Return a list of revisions matching the given revset'''
467 expr = revset.formatspec(expr, *args)
467 expr = revset.formatspec(expr, *args)
468 m = revset.match(None, expr)
468 m = revset.match(None, expr)
469 return m(self, revset.spanset(self))
469 return m(self, revset.spanset(self))
470
470
471 def set(self, expr, *args):
471 def set(self, expr, *args):
472 '''
472 '''
473 Yield a context for each matching revision, after doing arg
473 Yield a context for each matching revision, after doing arg
474 replacement via revset.formatspec
474 replacement via revset.formatspec
475 '''
475 '''
476 for r in self.revs(expr, *args):
476 for r in self.revs(expr, *args):
477 yield self[r]
477 yield self[r]
478
478
479 def url(self):
479 def url(self):
480 return 'file:' + self.root
480 return 'file:' + self.root
481
481
482 def hook(self, name, throw=False, **args):
482 def hook(self, name, throw=False, **args):
483 """Call a hook, passing this repo instance.
483 """Call a hook, passing this repo instance.
484
484
485 This a convenience method to aid invoking hooks. Extensions likely
485 This a convenience method to aid invoking hooks. Extensions likely
486 won't call this unless they have registered a custom hook or are
486 won't call this unless they have registered a custom hook or are
487 replacing code that is expected to call a hook.
487 replacing code that is expected to call a hook.
488 """
488 """
489 return hook.hook(self.ui, self, name, throw, **args)
489 return hook.hook(self.ui, self, name, throw, **args)
490
490
491 @unfilteredmethod
491 @unfilteredmethod
492 def _tag(self, names, node, message, local, user, date, extra={},
492 def _tag(self, names, node, message, local, user, date, extra={},
493 editor=False):
493 editor=False):
494 if isinstance(names, str):
494 if isinstance(names, str):
495 names = (names,)
495 names = (names,)
496
496
497 branches = self.branchmap()
497 branches = self.branchmap()
498 for name in names:
498 for name in names:
499 self.hook('pretag', throw=True, node=hex(node), tag=name,
499 self.hook('pretag', throw=True, node=hex(node), tag=name,
500 local=local)
500 local=local)
501 if name in branches:
501 if name in branches:
502 self.ui.warn(_("warning: tag %s conflicts with existing"
502 self.ui.warn(_("warning: tag %s conflicts with existing"
503 " branch name\n") % name)
503 " branch name\n") % name)
504
504
505 def writetags(fp, names, munge, prevtags):
505 def writetags(fp, names, munge, prevtags):
506 fp.seek(0, 2)
506 fp.seek(0, 2)
507 if prevtags and prevtags[-1] != '\n':
507 if prevtags and prevtags[-1] != '\n':
508 fp.write('\n')
508 fp.write('\n')
509 for name in names:
509 for name in names:
510 m = munge and munge(name) or name
510 m = munge and munge(name) or name
511 if (self._tagscache.tagtypes and
511 if (self._tagscache.tagtypes and
512 name in self._tagscache.tagtypes):
512 name in self._tagscache.tagtypes):
513 old = self.tags().get(name, nullid)
513 old = self.tags().get(name, nullid)
514 fp.write('%s %s\n' % (hex(old), m))
514 fp.write('%s %s\n' % (hex(old), m))
515 fp.write('%s %s\n' % (hex(node), m))
515 fp.write('%s %s\n' % (hex(node), m))
516 fp.close()
516 fp.close()
517
517
518 prevtags = ''
518 prevtags = ''
519 if local:
519 if local:
520 try:
520 try:
521 fp = self.opener('localtags', 'r+')
521 fp = self.opener('localtags', 'r+')
522 except IOError:
522 except IOError:
523 fp = self.opener('localtags', 'a')
523 fp = self.opener('localtags', 'a')
524 else:
524 else:
525 prevtags = fp.read()
525 prevtags = fp.read()
526
526
527 # local tags are stored in the current charset
527 # local tags are stored in the current charset
528 writetags(fp, names, None, prevtags)
528 writetags(fp, names, None, prevtags)
529 for name in names:
529 for name in names:
530 self.hook('tag', node=hex(node), tag=name, local=local)
530 self.hook('tag', node=hex(node), tag=name, local=local)
531 return
531 return
532
532
533 try:
533 try:
534 fp = self.wfile('.hgtags', 'rb+')
534 fp = self.wfile('.hgtags', 'rb+')
535 except IOError, e:
535 except IOError, e:
536 if e.errno != errno.ENOENT:
536 if e.errno != errno.ENOENT:
537 raise
537 raise
538 fp = self.wfile('.hgtags', 'ab')
538 fp = self.wfile('.hgtags', 'ab')
539 else:
539 else:
540 prevtags = fp.read()
540 prevtags = fp.read()
541
541
542 # committed tags are stored in UTF-8
542 # committed tags are stored in UTF-8
543 writetags(fp, names, encoding.fromlocal, prevtags)
543 writetags(fp, names, encoding.fromlocal, prevtags)
544
544
545 fp.close()
545 fp.close()
546
546
547 self.invalidatecaches()
547 self.invalidatecaches()
548
548
549 if '.hgtags' not in self.dirstate:
549 if '.hgtags' not in self.dirstate:
550 self[None].add(['.hgtags'])
550 self[None].add(['.hgtags'])
551
551
552 m = matchmod.exact(self.root, '', ['.hgtags'])
552 m = matchmod.exact(self.root, '', ['.hgtags'])
553 tagnode = self.commit(message, user, date, extra=extra, match=m,
553 tagnode = self.commit(message, user, date, extra=extra, match=m,
554 editor=editor)
554 editor=editor)
555
555
556 for name in names:
556 for name in names:
557 self.hook('tag', node=hex(node), tag=name, local=local)
557 self.hook('tag', node=hex(node), tag=name, local=local)
558
558
559 return tagnode
559 return tagnode
560
560
561 def tag(self, names, node, message, local, user, date, editor=False):
561 def tag(self, names, node, message, local, user, date, editor=False):
562 '''tag a revision with one or more symbolic names.
562 '''tag a revision with one or more symbolic names.
563
563
564 names is a list of strings or, when adding a single tag, names may be a
564 names is a list of strings or, when adding a single tag, names may be a
565 string.
565 string.
566
566
567 if local is True, the tags are stored in a per-repository file.
567 if local is True, the tags are stored in a per-repository file.
568 otherwise, they are stored in the .hgtags file, and a new
568 otherwise, they are stored in the .hgtags file, and a new
569 changeset is committed with the change.
569 changeset is committed with the change.
570
570
571 keyword arguments:
571 keyword arguments:
572
572
573 local: whether to store tags in non-version-controlled file
573 local: whether to store tags in non-version-controlled file
574 (default False)
574 (default False)
575
575
576 message: commit message to use if committing
576 message: commit message to use if committing
577
577
578 user: name of user to use if committing
578 user: name of user to use if committing
579
579
580 date: date tuple to use if committing'''
580 date: date tuple to use if committing'''
581
581
582 if not local:
582 if not local:
583 m = matchmod.exact(self.root, '', ['.hgtags'])
583 m = matchmod.exact(self.root, '', ['.hgtags'])
584 if util.any(self.status(match=m, unknown=True, ignored=True)):
584 if util.any(self.status(match=m, unknown=True, ignored=True)):
585 raise util.Abort(_('working copy of .hgtags is changed'),
585 raise util.Abort(_('working copy of .hgtags is changed'),
586 hint=_('please commit .hgtags manually'))
586 hint=_('please commit .hgtags manually'))
587
587
588 self.tags() # instantiate the cache
588 self.tags() # instantiate the cache
589 self._tag(names, node, message, local, user, date, editor=editor)
589 self._tag(names, node, message, local, user, date, editor=editor)
590
590
591 @filteredpropertycache
591 @filteredpropertycache
592 def _tagscache(self):
592 def _tagscache(self):
593 '''Returns a tagscache object that contains various tags related
593 '''Returns a tagscache object that contains various tags related
594 caches.'''
594 caches.'''
595
595
596 # This simplifies its cache management by having one decorated
596 # This simplifies its cache management by having one decorated
597 # function (this one) and the rest simply fetch things from it.
597 # function (this one) and the rest simply fetch things from it.
598 class tagscache(object):
598 class tagscache(object):
599 def __init__(self):
599 def __init__(self):
600 # These two define the set of tags for this repository. tags
600 # These two define the set of tags for this repository. tags
601 # maps tag name to node; tagtypes maps tag name to 'global' or
601 # maps tag name to node; tagtypes maps tag name to 'global' or
602 # 'local'. (Global tags are defined by .hgtags across all
602 # 'local'. (Global tags are defined by .hgtags across all
603 # heads, and local tags are defined in .hg/localtags.)
603 # heads, and local tags are defined in .hg/localtags.)
604 # They constitute the in-memory cache of tags.
604 # They constitute the in-memory cache of tags.
605 self.tags = self.tagtypes = None
605 self.tags = self.tagtypes = None
606
606
607 self.nodetagscache = self.tagslist = None
607 self.nodetagscache = self.tagslist = None
608
608
609 cache = tagscache()
609 cache = tagscache()
610 cache.tags, cache.tagtypes = self._findtags()
610 cache.tags, cache.tagtypes = self._findtags()
611
611
612 return cache
612 return cache
613
613
614 def tags(self):
614 def tags(self):
615 '''return a mapping of tag to node'''
615 '''return a mapping of tag to node'''
616 t = {}
616 t = {}
617 if self.changelog.filteredrevs:
617 if self.changelog.filteredrevs:
618 tags, tt = self._findtags()
618 tags, tt = self._findtags()
619 else:
619 else:
620 tags = self._tagscache.tags
620 tags = self._tagscache.tags
621 for k, v in tags.iteritems():
621 for k, v in tags.iteritems():
622 try:
622 try:
623 # ignore tags to unknown nodes
623 # ignore tags to unknown nodes
624 self.changelog.rev(v)
624 self.changelog.rev(v)
625 t[k] = v
625 t[k] = v
626 except (error.LookupError, ValueError):
626 except (error.LookupError, ValueError):
627 pass
627 pass
628 return t
628 return t
629
629
630 def _findtags(self):
630 def _findtags(self):
631 '''Do the hard work of finding tags. Return a pair of dicts
631 '''Do the hard work of finding tags. Return a pair of dicts
632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
633 maps tag name to a string like \'global\' or \'local\'.
633 maps tag name to a string like \'global\' or \'local\'.
634 Subclasses or extensions are free to add their own tags, but
634 Subclasses or extensions are free to add their own tags, but
635 should be aware that the returned dicts will be retained for the
635 should be aware that the returned dicts will be retained for the
636 duration of the localrepo object.'''
636 duration of the localrepo object.'''
637
637
638 # XXX what tagtype should subclasses/extensions use? Currently
638 # XXX what tagtype should subclasses/extensions use? Currently
639 # mq and bookmarks add tags, but do not set the tagtype at all.
639 # mq and bookmarks add tags, but do not set the tagtype at all.
640 # Should each extension invent its own tag type? Should there
640 # Should each extension invent its own tag type? Should there
641 # be one tagtype for all such "virtual" tags? Or is the status
641 # be one tagtype for all such "virtual" tags? Or is the status
642 # quo fine?
642 # quo fine?
643
643
644 alltags = {} # map tag name to (node, hist)
644 alltags = {} # map tag name to (node, hist)
645 tagtypes = {}
645 tagtypes = {}
646
646
647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
649
649
650 # Build the return dicts. Have to re-encode tag names because
650 # Build the return dicts. Have to re-encode tag names because
651 # the tags module always uses UTF-8 (in order not to lose info
651 # the tags module always uses UTF-8 (in order not to lose info
652 # writing to the cache), but the rest of Mercurial wants them in
652 # writing to the cache), but the rest of Mercurial wants them in
653 # local encoding.
653 # local encoding.
654 tags = {}
654 tags = {}
655 for (name, (node, hist)) in alltags.iteritems():
655 for (name, (node, hist)) in alltags.iteritems():
656 if node != nullid:
656 if node != nullid:
657 tags[encoding.tolocal(name)] = node
657 tags[encoding.tolocal(name)] = node
658 tags['tip'] = self.changelog.tip()
658 tags['tip'] = self.changelog.tip()
659 tagtypes = dict([(encoding.tolocal(name), value)
659 tagtypes = dict([(encoding.tolocal(name), value)
660 for (name, value) in tagtypes.iteritems()])
660 for (name, value) in tagtypes.iteritems()])
661 return (tags, tagtypes)
661 return (tags, tagtypes)
662
662
663 def tagtype(self, tagname):
663 def tagtype(self, tagname):
664 '''
664 '''
665 return the type of the given tag. result can be:
665 return the type of the given tag. result can be:
666
666
667 'local' : a local tag
667 'local' : a local tag
668 'global' : a global tag
668 'global' : a global tag
669 None : tag does not exist
669 None : tag does not exist
670 '''
670 '''
671
671
672 return self._tagscache.tagtypes.get(tagname)
672 return self._tagscache.tagtypes.get(tagname)
673
673
674 def tagslist(self):
674 def tagslist(self):
675 '''return a list of tags ordered by revision'''
675 '''return a list of tags ordered by revision'''
676 if not self._tagscache.tagslist:
676 if not self._tagscache.tagslist:
677 l = []
677 l = []
678 for t, n in self.tags().iteritems():
678 for t, n in self.tags().iteritems():
679 l.append((self.changelog.rev(n), t, n))
679 l.append((self.changelog.rev(n), t, n))
680 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
680 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
681
681
682 return self._tagscache.tagslist
682 return self._tagscache.tagslist
683
683
684 def nodetags(self, node):
684 def nodetags(self, node):
685 '''return the tags associated with a node'''
685 '''return the tags associated with a node'''
686 if not self._tagscache.nodetagscache:
686 if not self._tagscache.nodetagscache:
687 nodetagscache = {}
687 nodetagscache = {}
688 for t, n in self._tagscache.tags.iteritems():
688 for t, n in self._tagscache.tags.iteritems():
689 nodetagscache.setdefault(n, []).append(t)
689 nodetagscache.setdefault(n, []).append(t)
690 for tags in nodetagscache.itervalues():
690 for tags in nodetagscache.itervalues():
691 tags.sort()
691 tags.sort()
692 self._tagscache.nodetagscache = nodetagscache
692 self._tagscache.nodetagscache = nodetagscache
693 return self._tagscache.nodetagscache.get(node, [])
693 return self._tagscache.nodetagscache.get(node, [])
694
694
695 def nodebookmarks(self, node):
695 def nodebookmarks(self, node):
696 marks = []
696 marks = []
697 for bookmark, n in self._bookmarks.iteritems():
697 for bookmark, n in self._bookmarks.iteritems():
698 if n == node:
698 if n == node:
699 marks.append(bookmark)
699 marks.append(bookmark)
700 return sorted(marks)
700 return sorted(marks)
701
701
702 def branchmap(self):
702 def branchmap(self):
703 '''returns a dictionary {branch: [branchheads]} with branchheads
703 '''returns a dictionary {branch: [branchheads]} with branchheads
704 ordered by increasing revision number'''
704 ordered by increasing revision number'''
705 branchmap.updatecache(self)
705 branchmap.updatecache(self)
706 return self._branchcaches[self.filtername]
706 return self._branchcaches[self.filtername]
707
707
708 def branchtip(self, branch):
708 def branchtip(self, branch):
709 '''return the tip node for a given branch'''
709 '''return the tip node for a given branch'''
710 try:
710 try:
711 return self.branchmap().branchtip(branch)
711 return self.branchmap().branchtip(branch)
712 except KeyError:
712 except KeyError:
713 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
713 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
714
714
715 def lookup(self, key):
715 def lookup(self, key):
716 return self[key].node()
716 return self[key].node()
717
717
718 def lookupbranch(self, key, remote=None):
718 def lookupbranch(self, key, remote=None):
719 repo = remote or self
719 repo = remote or self
720 if key in repo.branchmap():
720 if key in repo.branchmap():
721 return key
721 return key
722
722
723 repo = (remote and remote.local()) and remote or self
723 repo = (remote and remote.local()) and remote or self
724 return repo[key].branch()
724 return repo[key].branch()
725
725
726 def known(self, nodes):
726 def known(self, nodes):
727 nm = self.changelog.nodemap
727 nm = self.changelog.nodemap
728 pc = self._phasecache
728 pc = self._phasecache
729 result = []
729 result = []
730 for n in nodes:
730 for n in nodes:
731 r = nm.get(n)
731 r = nm.get(n)
732 resp = not (r is None or pc.phase(self, r) >= phases.secret)
732 resp = not (r is None or pc.phase(self, r) >= phases.secret)
733 result.append(resp)
733 result.append(resp)
734 return result
734 return result
735
735
736 def local(self):
736 def local(self):
737 return self
737 return self
738
738
739 def cancopy(self):
739 def cancopy(self):
740 # so statichttprepo's override of local() works
740 # so statichttprepo's override of local() works
741 if not self.local():
741 if not self.local():
742 return False
742 return False
743 if not self.ui.configbool('phases', 'publish', True):
743 if not self.ui.configbool('phases', 'publish', True):
744 return True
744 return True
745 # if publishing we can't copy if there is filtered content
745 # if publishing we can't copy if there is filtered content
746 return not self.filtered('visible').changelog.filteredrevs
746 return not self.filtered('visible').changelog.filteredrevs
747
747
748 def join(self, f, *insidef):
748 def join(self, f, *insidef):
749 return os.path.join(self.path, f, *insidef)
749 return os.path.join(self.path, f, *insidef)
750
750
751 def wjoin(self, f, *insidef):
751 def wjoin(self, f, *insidef):
752 return os.path.join(self.root, f, *insidef)
752 return os.path.join(self.root, f, *insidef)
753
753
754 def file(self, f):
754 def file(self, f):
755 if f[0] == '/':
755 if f[0] == '/':
756 f = f[1:]
756 f = f[1:]
757 return filelog.filelog(self.sopener, f)
757 return filelog.filelog(self.sopener, f)
758
758
759 def changectx(self, changeid):
759 def changectx(self, changeid):
760 return self[changeid]
760 return self[changeid]
761
761
762 def parents(self, changeid=None):
762 def parents(self, changeid=None):
763 '''get list of changectxs for parents of changeid'''
763 '''get list of changectxs for parents of changeid'''
764 return self[changeid].parents()
764 return self[changeid].parents()
765
765
766 def setparents(self, p1, p2=nullid):
766 def setparents(self, p1, p2=nullid):
767 self.dirstate.beginparentchange()
767 self.dirstate.beginparentchange()
768 copies = self.dirstate.setparents(p1, p2)
768 copies = self.dirstate.setparents(p1, p2)
769 pctx = self[p1]
769 pctx = self[p1]
770 if copies:
770 if copies:
771 # Adjust copy records, the dirstate cannot do it, it
771 # Adjust copy records, the dirstate cannot do it, it
772 # requires access to parents manifests. Preserve them
772 # requires access to parents manifests. Preserve them
773 # only for entries added to first parent.
773 # only for entries added to first parent.
774 for f in copies:
774 for f in copies:
775 if f not in pctx and copies[f] in pctx:
775 if f not in pctx and copies[f] in pctx:
776 self.dirstate.copy(copies[f], f)
776 self.dirstate.copy(copies[f], f)
777 if p2 == nullid:
777 if p2 == nullid:
778 for f, s in sorted(self.dirstate.copies().items()):
778 for f, s in sorted(self.dirstate.copies().items()):
779 if f not in pctx and s not in pctx:
779 if f not in pctx and s not in pctx:
780 self.dirstate.copy(None, f)
780 self.dirstate.copy(None, f)
781 self.dirstate.endparentchange()
781 self.dirstate.endparentchange()
782
782
783 def filectx(self, path, changeid=None, fileid=None):
783 def filectx(self, path, changeid=None, fileid=None):
784 """changeid can be a changeset revision, node, or tag.
784 """changeid can be a changeset revision, node, or tag.
785 fileid can be a file revision or node."""
785 fileid can be a file revision or node."""
786 return context.filectx(self, path, changeid, fileid)
786 return context.filectx(self, path, changeid, fileid)
787
787
788 def getcwd(self):
788 def getcwd(self):
789 return self.dirstate.getcwd()
789 return self.dirstate.getcwd()
790
790
791 def pathto(self, f, cwd=None):
791 def pathto(self, f, cwd=None):
792 return self.dirstate.pathto(f, cwd)
792 return self.dirstate.pathto(f, cwd)
793
793
794 def wfile(self, f, mode='r'):
794 def wfile(self, f, mode='r'):
795 return self.wopener(f, mode)
795 return self.wopener(f, mode)
796
796
797 def _link(self, f):
797 def _link(self, f):
798 return self.wvfs.islink(f)
798 return self.wvfs.islink(f)
799
799
800 def _loadfilter(self, filter):
800 def _loadfilter(self, filter):
801 if filter not in self.filterpats:
801 if filter not in self.filterpats:
802 l = []
802 l = []
803 for pat, cmd in self.ui.configitems(filter):
803 for pat, cmd in self.ui.configitems(filter):
804 if cmd == '!':
804 if cmd == '!':
805 continue
805 continue
806 mf = matchmod.match(self.root, '', [pat])
806 mf = matchmod.match(self.root, '', [pat])
807 fn = None
807 fn = None
808 params = cmd
808 params = cmd
809 for name, filterfn in self._datafilters.iteritems():
809 for name, filterfn in self._datafilters.iteritems():
810 if cmd.startswith(name):
810 if cmd.startswith(name):
811 fn = filterfn
811 fn = filterfn
812 params = cmd[len(name):].lstrip()
812 params = cmd[len(name):].lstrip()
813 break
813 break
814 if not fn:
814 if not fn:
815 fn = lambda s, c, **kwargs: util.filter(s, c)
815 fn = lambda s, c, **kwargs: util.filter(s, c)
816 # Wrap old filters not supporting keyword arguments
816 # Wrap old filters not supporting keyword arguments
817 if not inspect.getargspec(fn)[2]:
817 if not inspect.getargspec(fn)[2]:
818 oldfn = fn
818 oldfn = fn
819 fn = lambda s, c, **kwargs: oldfn(s, c)
819 fn = lambda s, c, **kwargs: oldfn(s, c)
820 l.append((mf, fn, params))
820 l.append((mf, fn, params))
821 self.filterpats[filter] = l
821 self.filterpats[filter] = l
822 return self.filterpats[filter]
822 return self.filterpats[filter]
823
823
824 def _filter(self, filterpats, filename, data):
824 def _filter(self, filterpats, filename, data):
825 for mf, fn, cmd in filterpats:
825 for mf, fn, cmd in filterpats:
826 if mf(filename):
826 if mf(filename):
827 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
827 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
828 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
828 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
829 break
829 break
830
830
831 return data
831 return data
832
832
833 @unfilteredpropertycache
833 @unfilteredpropertycache
834 def _encodefilterpats(self):
834 def _encodefilterpats(self):
835 return self._loadfilter('encode')
835 return self._loadfilter('encode')
836
836
837 @unfilteredpropertycache
837 @unfilteredpropertycache
838 def _decodefilterpats(self):
838 def _decodefilterpats(self):
839 return self._loadfilter('decode')
839 return self._loadfilter('decode')
840
840
841 def adddatafilter(self, name, filter):
841 def adddatafilter(self, name, filter):
842 self._datafilters[name] = filter
842 self._datafilters[name] = filter
843
843
844 def wread(self, filename):
844 def wread(self, filename):
845 if self._link(filename):
845 if self._link(filename):
846 data = self.wvfs.readlink(filename)
846 data = self.wvfs.readlink(filename)
847 else:
847 else:
848 data = self.wopener.read(filename)
848 data = self.wopener.read(filename)
849 return self._filter(self._encodefilterpats, filename, data)
849 return self._filter(self._encodefilterpats, filename, data)
850
850
851 def wwrite(self, filename, data, flags):
851 def wwrite(self, filename, data, flags):
852 data = self._filter(self._decodefilterpats, filename, data)
852 data = self._filter(self._decodefilterpats, filename, data)
853 if 'l' in flags:
853 if 'l' in flags:
854 self.wopener.symlink(data, filename)
854 self.wopener.symlink(data, filename)
855 else:
855 else:
856 self.wopener.write(filename, data)
856 self.wopener.write(filename, data)
857 if 'x' in flags:
857 if 'x' in flags:
858 self.wvfs.setflags(filename, False, True)
858 self.wvfs.setflags(filename, False, True)
859
859
860 def wwritedata(self, filename, data):
860 def wwritedata(self, filename, data):
861 return self._filter(self._decodefilterpats, filename, data)
861 return self._filter(self._decodefilterpats, filename, data)
862
862
863 def transaction(self, desc, report=None):
863 def transaction(self, desc, report=None):
864 tr = self._transref and self._transref() or None
864 tr = self._transref and self._transref() or None
865 if tr and tr.running():
865 if tr and tr.running():
866 return tr.nest()
866 return tr.nest()
867
867
868 # abort here if the journal already exists
868 # abort here if the journal already exists
869 if self.svfs.exists("journal"):
869 if self.svfs.exists("journal"):
870 raise error.RepoError(
870 raise error.RepoError(
871 _("abandoned transaction found"),
871 _("abandoned transaction found"),
872 hint=_("run 'hg recover' to clean up transaction"))
872 hint=_("run 'hg recover' to clean up transaction"))
873
873
874 def onclose():
874 def onclose():
875 self.store.write(self._transref())
875 self.store.write(self._transref())
876
876
877 self._writejournal(desc)
877 self._writejournal(desc)
878 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
878 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
879 rp = report and report or self.ui.warn
879 rp = report and report or self.ui.warn
880 tr = transaction.transaction(rp, self.sopener,
880 tr = transaction.transaction(rp, self.sopener,
881 "journal",
881 "journal",
882 aftertrans(renames),
882 aftertrans(renames),
883 self.store.createmode,
883 self.store.createmode,
884 onclose)
884 onclose)
885 self._transref = weakref.ref(tr)
885 self._transref = weakref.ref(tr)
886 return tr
886 return tr
887
887
888 def _journalfiles(self):
888 def _journalfiles(self):
889 return ((self.svfs, 'journal'),
889 return ((self.svfs, 'journal'),
890 (self.vfs, 'journal.dirstate'),
890 (self.vfs, 'journal.dirstate'),
891 (self.vfs, 'journal.branch'),
891 (self.vfs, 'journal.branch'),
892 (self.vfs, 'journal.desc'),
892 (self.vfs, 'journal.desc'),
893 (self.vfs, 'journal.bookmarks'),
893 (self.vfs, 'journal.bookmarks'),
894 (self.svfs, 'journal.phaseroots'))
894 (self.svfs, 'journal.phaseroots'))
895
895
896 def undofiles(self):
896 def undofiles(self):
897 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
897 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
898
898
899 def _writejournal(self, desc):
899 def _writejournal(self, desc):
900 self.opener.write("journal.dirstate",
900 self.opener.write("journal.dirstate",
901 self.opener.tryread("dirstate"))
901 self.opener.tryread("dirstate"))
902 self.opener.write("journal.branch",
902 self.opener.write("journal.branch",
903 encoding.fromlocal(self.dirstate.branch()))
903 encoding.fromlocal(self.dirstate.branch()))
904 self.opener.write("journal.desc",
904 self.opener.write("journal.desc",
905 "%d\n%s\n" % (len(self), desc))
905 "%d\n%s\n" % (len(self), desc))
906 self.opener.write("journal.bookmarks",
906 self.opener.write("journal.bookmarks",
907 self.opener.tryread("bookmarks"))
907 self.opener.tryread("bookmarks"))
908 self.sopener.write("journal.phaseroots",
908 self.sopener.write("journal.phaseroots",
909 self.sopener.tryread("phaseroots"))
909 self.sopener.tryread("phaseroots"))
910
910
911 def recover(self):
911 def recover(self):
912 lock = self.lock()
912 lock = self.lock()
913 try:
913 try:
914 if self.svfs.exists("journal"):
914 if self.svfs.exists("journal"):
915 self.ui.status(_("rolling back interrupted transaction\n"))
915 self.ui.status(_("rolling back interrupted transaction\n"))
916 transaction.rollback(self.sopener, "journal",
916 transaction.rollback(self.sopener, "journal",
917 self.ui.warn)
917 self.ui.warn)
918 self.invalidate()
918 self.invalidate()
919 return True
919 return True
920 else:
920 else:
921 self.ui.warn(_("no interrupted transaction available\n"))
921 self.ui.warn(_("no interrupted transaction available\n"))
922 return False
922 return False
923 finally:
923 finally:
924 lock.release()
924 lock.release()
925
925
926 def rollback(self, dryrun=False, force=False):
926 def rollback(self, dryrun=False, force=False):
927 wlock = lock = None
927 wlock = lock = None
928 try:
928 try:
929 wlock = self.wlock()
929 wlock = self.wlock()
930 lock = self.lock()
930 lock = self.lock()
931 if self.svfs.exists("undo"):
931 if self.svfs.exists("undo"):
932 return self._rollback(dryrun, force)
932 return self._rollback(dryrun, force)
933 else:
933 else:
934 self.ui.warn(_("no rollback information available\n"))
934 self.ui.warn(_("no rollback information available\n"))
935 return 1
935 return 1
936 finally:
936 finally:
937 release(lock, wlock)
937 release(lock, wlock)
938
938
939 @unfilteredmethod # Until we get smarter cache management
939 @unfilteredmethod # Until we get smarter cache management
940 def _rollback(self, dryrun, force):
940 def _rollback(self, dryrun, force):
941 ui = self.ui
941 ui = self.ui
942 try:
942 try:
943 args = self.opener.read('undo.desc').splitlines()
943 args = self.opener.read('undo.desc').splitlines()
944 (oldlen, desc, detail) = (int(args[0]), args[1], None)
944 (oldlen, desc, detail) = (int(args[0]), args[1], None)
945 if len(args) >= 3:
945 if len(args) >= 3:
946 detail = args[2]
946 detail = args[2]
947 oldtip = oldlen - 1
947 oldtip = oldlen - 1
948
948
949 if detail and ui.verbose:
949 if detail and ui.verbose:
950 msg = (_('repository tip rolled back to revision %s'
950 msg = (_('repository tip rolled back to revision %s'
951 ' (undo %s: %s)\n')
951 ' (undo %s: %s)\n')
952 % (oldtip, desc, detail))
952 % (oldtip, desc, detail))
953 else:
953 else:
954 msg = (_('repository tip rolled back to revision %s'
954 msg = (_('repository tip rolled back to revision %s'
955 ' (undo %s)\n')
955 ' (undo %s)\n')
956 % (oldtip, desc))
956 % (oldtip, desc))
957 except IOError:
957 except IOError:
958 msg = _('rolling back unknown transaction\n')
958 msg = _('rolling back unknown transaction\n')
959 desc = None
959 desc = None
960
960
961 if not force and self['.'] != self['tip'] and desc == 'commit':
961 if not force and self['.'] != self['tip'] and desc == 'commit':
962 raise util.Abort(
962 raise util.Abort(
963 _('rollback of last commit while not checked out '
963 _('rollback of last commit while not checked out '
964 'may lose data'), hint=_('use -f to force'))
964 'may lose data'), hint=_('use -f to force'))
965
965
966 ui.status(msg)
966 ui.status(msg)
967 if dryrun:
967 if dryrun:
968 return 0
968 return 0
969
969
970 parents = self.dirstate.parents()
970 parents = self.dirstate.parents()
971 self.destroying()
971 self.destroying()
972 transaction.rollback(self.sopener, 'undo', ui.warn)
972 transaction.rollback(self.sopener, 'undo', ui.warn)
973 if self.vfs.exists('undo.bookmarks'):
973 if self.vfs.exists('undo.bookmarks'):
974 self.vfs.rename('undo.bookmarks', 'bookmarks')
974 self.vfs.rename('undo.bookmarks', 'bookmarks')
975 if self.svfs.exists('undo.phaseroots'):
975 if self.svfs.exists('undo.phaseroots'):
976 self.svfs.rename('undo.phaseroots', 'phaseroots')
976 self.svfs.rename('undo.phaseroots', 'phaseroots')
977 self.invalidate()
977 self.invalidate()
978
978
979 parentgone = (parents[0] not in self.changelog.nodemap or
979 parentgone = (parents[0] not in self.changelog.nodemap or
980 parents[1] not in self.changelog.nodemap)
980 parents[1] not in self.changelog.nodemap)
981 if parentgone:
981 if parentgone:
982 self.vfs.rename('undo.dirstate', 'dirstate')
982 self.vfs.rename('undo.dirstate', 'dirstate')
983 try:
983 try:
984 branch = self.opener.read('undo.branch')
984 branch = self.opener.read('undo.branch')
985 self.dirstate.setbranch(encoding.tolocal(branch))
985 self.dirstate.setbranch(encoding.tolocal(branch))
986 except IOError:
986 except IOError:
987 ui.warn(_('named branch could not be reset: '
987 ui.warn(_('named branch could not be reset: '
988 'current branch is still \'%s\'\n')
988 'current branch is still \'%s\'\n')
989 % self.dirstate.branch())
989 % self.dirstate.branch())
990
990
991 self.dirstate.invalidate()
991 self.dirstate.invalidate()
992 parents = tuple([p.rev() for p in self.parents()])
992 parents = tuple([p.rev() for p in self.parents()])
993 if len(parents) > 1:
993 if len(parents) > 1:
994 ui.status(_('working directory now based on '
994 ui.status(_('working directory now based on '
995 'revisions %d and %d\n') % parents)
995 'revisions %d and %d\n') % parents)
996 else:
996 else:
997 ui.status(_('working directory now based on '
997 ui.status(_('working directory now based on '
998 'revision %d\n') % parents)
998 'revision %d\n') % parents)
999 # TODO: if we know which new heads may result from this rollback, pass
999 # TODO: if we know which new heads may result from this rollback, pass
1000 # them to destroy(), which will prevent the branchhead cache from being
1000 # them to destroy(), which will prevent the branchhead cache from being
1001 # invalidated.
1001 # invalidated.
1002 self.destroyed()
1002 self.destroyed()
1003 return 0
1003 return 0
1004
1004
1005 def invalidatecaches(self):
1005 def invalidatecaches(self):
1006
1006
1007 if '_tagscache' in vars(self):
1007 if '_tagscache' in vars(self):
1008 # can't use delattr on proxy
1008 # can't use delattr on proxy
1009 del self.__dict__['_tagscache']
1009 del self.__dict__['_tagscache']
1010
1010
1011 self.unfiltered()._branchcaches.clear()
1011 self.unfiltered()._branchcaches.clear()
1012 self.invalidatevolatilesets()
1012 self.invalidatevolatilesets()
1013
1013
1014 def invalidatevolatilesets(self):
1014 def invalidatevolatilesets(self):
1015 self.filteredrevcache.clear()
1015 self.filteredrevcache.clear()
1016 obsolete.clearobscaches(self)
1016 obsolete.clearobscaches(self)
1017
1017
1018 def invalidatedirstate(self):
1018 def invalidatedirstate(self):
1019 '''Invalidates the dirstate, causing the next call to dirstate
1019 '''Invalidates the dirstate, causing the next call to dirstate
1020 to check if it was modified since the last time it was read,
1020 to check if it was modified since the last time it was read,
1021 rereading it if it has.
1021 rereading it if it has.
1022
1022
1023 This is different to dirstate.invalidate() that it doesn't always
1023 This is different to dirstate.invalidate() that it doesn't always
1024 rereads the dirstate. Use dirstate.invalidate() if you want to
1024 rereads the dirstate. Use dirstate.invalidate() if you want to
1025 explicitly read the dirstate again (i.e. restoring it to a previous
1025 explicitly read the dirstate again (i.e. restoring it to a previous
1026 known good state).'''
1026 known good state).'''
1027 if hasunfilteredcache(self, 'dirstate'):
1027 if hasunfilteredcache(self, 'dirstate'):
1028 for k in self.dirstate._filecache:
1028 for k in self.dirstate._filecache:
1029 try:
1029 try:
1030 delattr(self.dirstate, k)
1030 delattr(self.dirstate, k)
1031 except AttributeError:
1031 except AttributeError:
1032 pass
1032 pass
1033 delattr(self.unfiltered(), 'dirstate')
1033 delattr(self.unfiltered(), 'dirstate')
1034
1034
1035 def invalidate(self):
1035 def invalidate(self):
1036 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1036 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1037 for k in self._filecache:
1037 for k in self._filecache:
1038 # dirstate is invalidated separately in invalidatedirstate()
1038 # dirstate is invalidated separately in invalidatedirstate()
1039 if k == 'dirstate':
1039 if k == 'dirstate':
1040 continue
1040 continue
1041
1041
1042 try:
1042 try:
1043 delattr(unfiltered, k)
1043 delattr(unfiltered, k)
1044 except AttributeError:
1044 except AttributeError:
1045 pass
1045 pass
1046 self.invalidatecaches()
1046 self.invalidatecaches()
1047 self.store.invalidatecaches()
1047 self.store.invalidatecaches()
1048
1048
1049 def invalidateall(self):
1049 def invalidateall(self):
1050 '''Fully invalidates both store and non-store parts, causing the
1050 '''Fully invalidates both store and non-store parts, causing the
1051 subsequent operation to reread any outside changes.'''
1051 subsequent operation to reread any outside changes.'''
1052 # extension should hook this to invalidate its caches
1052 # extension should hook this to invalidate its caches
1053 self.invalidate()
1053 self.invalidate()
1054 self.invalidatedirstate()
1054 self.invalidatedirstate()
1055
1055
1056 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1056 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1057 try:
1057 try:
1058 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1058 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1059 except error.LockHeld, inst:
1059 except error.LockHeld, inst:
1060 if not wait:
1060 if not wait:
1061 raise
1061 raise
1062 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1062 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1063 (desc, inst.locker))
1063 (desc, inst.locker))
1064 # default to 600 seconds timeout
1064 # default to 600 seconds timeout
1065 l = lockmod.lock(vfs, lockname,
1065 l = lockmod.lock(vfs, lockname,
1066 int(self.ui.config("ui", "timeout", "600")),
1066 int(self.ui.config("ui", "timeout", "600")),
1067 releasefn, desc=desc)
1067 releasefn, desc=desc)
1068 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1068 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1069 if acquirefn:
1069 if acquirefn:
1070 acquirefn()
1070 acquirefn()
1071 return l
1071 return l
1072
1072
1073 def _afterlock(self, callback):
1073 def _afterlock(self, callback):
1074 """add a callback to the current repository lock.
1074 """add a callback to the current repository lock.
1075
1075
1076 The callback will be executed on lock release."""
1076 The callback will be executed on lock release."""
1077 l = self._lockref and self._lockref()
1077 l = self._lockref and self._lockref()
1078 if l:
1078 if l:
1079 l.postrelease.append(callback)
1079 l.postrelease.append(callback)
1080 else:
1080 else:
1081 callback()
1081 callback()
1082
1082
1083 def lock(self, wait=True):
1083 def lock(self, wait=True):
1084 '''Lock the repository store (.hg/store) and return a weak reference
1084 '''Lock the repository store (.hg/store) and return a weak reference
1085 to the lock. Use this before modifying the store (e.g. committing or
1085 to the lock. Use this before modifying the store (e.g. committing or
1086 stripping). If you are opening a transaction, get a lock as well.)'''
1086 stripping). If you are opening a transaction, get a lock as well.)'''
1087 l = self._lockref and self._lockref()
1087 l = self._lockref and self._lockref()
1088 if l is not None and l.held:
1088 if l is not None and l.held:
1089 l.lock()
1089 l.lock()
1090 return l
1090 return l
1091
1091
1092 def unlock():
1092 def unlock():
1093 for k, ce in self._filecache.items():
1093 for k, ce in self._filecache.items():
1094 if k == 'dirstate' or k not in self.__dict__:
1094 if k == 'dirstate' or k not in self.__dict__:
1095 continue
1095 continue
1096 ce.refresh()
1096 ce.refresh()
1097
1097
1098 l = self._lock(self.svfs, "lock", wait, unlock,
1098 l = self._lock(self.svfs, "lock", wait, unlock,
1099 self.invalidate, _('repository %s') % self.origroot)
1099 self.invalidate, _('repository %s') % self.origroot)
1100 self._lockref = weakref.ref(l)
1100 self._lockref = weakref.ref(l)
1101 return l
1101 return l
1102
1102
1103 def wlock(self, wait=True):
1103 def wlock(self, wait=True):
1104 '''Lock the non-store parts of the repository (everything under
1104 '''Lock the non-store parts of the repository (everything under
1105 .hg except .hg/store) and return a weak reference to the lock.
1105 .hg except .hg/store) and return a weak reference to the lock.
1106 Use this before modifying files in .hg.'''
1106 Use this before modifying files in .hg.'''
1107 l = self._wlockref and self._wlockref()
1107 l = self._wlockref and self._wlockref()
1108 if l is not None and l.held:
1108 if l is not None and l.held:
1109 l.lock()
1109 l.lock()
1110 return l
1110 return l
1111
1111
1112 def unlock():
1112 def unlock():
1113 if self.dirstate.pendingparentchange():
1113 if self.dirstate.pendingparentchange():
1114 self.dirstate.invalidate()
1114 self.dirstate.invalidate()
1115 else:
1115 else:
1116 self.dirstate.write()
1116 self.dirstate.write()
1117
1117
1118 self._filecache['dirstate'].refresh()
1118 self._filecache['dirstate'].refresh()
1119
1119
1120 l = self._lock(self.vfs, "wlock", wait, unlock,
1120 l = self._lock(self.vfs, "wlock", wait, unlock,
1121 self.invalidatedirstate, _('working directory of %s') %
1121 self.invalidatedirstate, _('working directory of %s') %
1122 self.origroot)
1122 self.origroot)
1123 self._wlockref = weakref.ref(l)
1123 self._wlockref = weakref.ref(l)
1124 return l
1124 return l
1125
1125
1126 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1126 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1127 """
1127 """
1128 commit an individual file as part of a larger transaction
1128 commit an individual file as part of a larger transaction
1129 """
1129 """
1130
1130
1131 fname = fctx.path()
1131 fname = fctx.path()
1132 text = fctx.data()
1132 text = fctx.data()
1133 flog = self.file(fname)
1133 flog = self.file(fname)
1134 fparent1 = manifest1.get(fname, nullid)
1134 fparent1 = manifest1.get(fname, nullid)
1135 fparent2 = manifest2.get(fname, nullid)
1135 fparent2 = manifest2.get(fname, nullid)
1136
1136
1137 meta = {}
1137 meta = {}
1138 copy = fctx.renamed()
1138 copy = fctx.renamed()
1139 if copy and copy[0] != fname:
1139 if copy and copy[0] != fname:
1140 # Mark the new revision of this file as a copy of another
1140 # Mark the new revision of this file as a copy of another
1141 # file. This copy data will effectively act as a parent
1141 # file. This copy data will effectively act as a parent
1142 # of this new revision. If this is a merge, the first
1142 # of this new revision. If this is a merge, the first
1143 # parent will be the nullid (meaning "look up the copy data")
1143 # parent will be the nullid (meaning "look up the copy data")
1144 # and the second one will be the other parent. For example:
1144 # and the second one will be the other parent. For example:
1145 #
1145 #
1146 # 0 --- 1 --- 3 rev1 changes file foo
1146 # 0 --- 1 --- 3 rev1 changes file foo
1147 # \ / rev2 renames foo to bar and changes it
1147 # \ / rev2 renames foo to bar and changes it
1148 # \- 2 -/ rev3 should have bar with all changes and
1148 # \- 2 -/ rev3 should have bar with all changes and
1149 # should record that bar descends from
1149 # should record that bar descends from
1150 # bar in rev2 and foo in rev1
1150 # bar in rev2 and foo in rev1
1151 #
1151 #
1152 # this allows this merge to succeed:
1152 # this allows this merge to succeed:
1153 #
1153 #
1154 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1154 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1155 # \ / merging rev3 and rev4 should use bar@rev2
1155 # \ / merging rev3 and rev4 should use bar@rev2
1156 # \- 2 --- 4 as the merge base
1156 # \- 2 --- 4 as the merge base
1157 #
1157 #
1158
1158
1159 cfname = copy[0]
1159 cfname = copy[0]
1160 crev = manifest1.get(cfname)
1160 crev = manifest1.get(cfname)
1161 newfparent = fparent2
1161 newfparent = fparent2
1162
1162
1163 if manifest2: # branch merge
1163 if manifest2: # branch merge
1164 if fparent2 == nullid or crev is None: # copied on remote side
1164 if fparent2 == nullid or crev is None: # copied on remote side
1165 if cfname in manifest2:
1165 if cfname in manifest2:
1166 crev = manifest2[cfname]
1166 crev = manifest2[cfname]
1167 newfparent = fparent1
1167 newfparent = fparent1
1168
1168
1169 # find source in nearest ancestor if we've lost track
1169 # find source in nearest ancestor if we've lost track
1170 if not crev:
1170 if not crev:
1171 self.ui.debug(" %s: searching for copy revision for %s\n" %
1171 self.ui.debug(" %s: searching for copy revision for %s\n" %
1172 (fname, cfname))
1172 (fname, cfname))
1173 for ancestor in self[None].ancestors():
1173 for ancestor in self[None].ancestors():
1174 if cfname in ancestor:
1174 if cfname in ancestor:
1175 crev = ancestor[cfname].filenode()
1175 crev = ancestor[cfname].filenode()
1176 break
1176 break
1177
1177
1178 if crev:
1178 if crev:
1179 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1179 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1180 meta["copy"] = cfname
1180 meta["copy"] = cfname
1181 meta["copyrev"] = hex(crev)
1181 meta["copyrev"] = hex(crev)
1182 fparent1, fparent2 = nullid, newfparent
1182 fparent1, fparent2 = nullid, newfparent
1183 else:
1183 else:
1184 self.ui.warn(_("warning: can't find ancestor for '%s' "
1184 self.ui.warn(_("warning: can't find ancestor for '%s' "
1185 "copied from '%s'!\n") % (fname, cfname))
1185 "copied from '%s'!\n") % (fname, cfname))
1186
1186
1187 elif fparent1 == nullid:
1187 elif fparent1 == nullid:
1188 fparent1, fparent2 = fparent2, nullid
1188 fparent1, fparent2 = fparent2, nullid
1189 elif fparent2 != nullid:
1189 elif fparent2 != nullid:
1190 # is one parent an ancestor of the other?
1190 # is one parent an ancestor of the other?
1191 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1191 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1192 if fparent1 in fparentancestors:
1192 if fparent1 in fparentancestors:
1193 fparent1, fparent2 = fparent2, nullid
1193 fparent1, fparent2 = fparent2, nullid
1194 elif fparent2 in fparentancestors:
1194 elif fparent2 in fparentancestors:
1195 fparent2 = nullid
1195 fparent2 = nullid
1196
1196
1197 # is the file changed?
1197 # is the file changed?
1198 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1198 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1199 changelist.append(fname)
1199 changelist.append(fname)
1200 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1200 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1201 # are just the flags changed during merge?
1201 # are just the flags changed during merge?
1202 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1202 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1203 changelist.append(fname)
1203 changelist.append(fname)
1204
1204
1205 return fparent1
1205 return fparent1
1206
1206
1207 @unfilteredmethod
1207 @unfilteredmethod
1208 def commit(self, text="", user=None, date=None, match=None, force=False,
1208 def commit(self, text="", user=None, date=None, match=None, force=False,
1209 editor=False, extra={}):
1209 editor=False, extra={}):
1210 """Add a new revision to current repository.
1210 """Add a new revision to current repository.
1211
1211
1212 Revision information is gathered from the working directory,
1212 Revision information is gathered from the working directory,
1213 match can be used to filter the committed files. If editor is
1213 match can be used to filter the committed files. If editor is
1214 supplied, it is called to get a commit message.
1214 supplied, it is called to get a commit message.
1215 """
1215 """
1216
1216
1217 def fail(f, msg):
1217 def fail(f, msg):
1218 raise util.Abort('%s: %s' % (f, msg))
1218 raise util.Abort('%s: %s' % (f, msg))
1219
1219
1220 if not match:
1220 if not match:
1221 match = matchmod.always(self.root, '')
1221 match = matchmod.always(self.root, '')
1222
1222
1223 if not force:
1223 if not force:
1224 vdirs = []
1224 vdirs = []
1225 match.explicitdir = vdirs.append
1225 match.explicitdir = vdirs.append
1226 match.bad = fail
1226 match.bad = fail
1227
1227
1228 wlock = self.wlock()
1228 wlock = self.wlock()
1229 try:
1229 try:
1230 wctx = self[None]
1230 wctx = self[None]
1231 merge = len(wctx.parents()) > 1
1231 merge = len(wctx.parents()) > 1
1232
1232
1233 if (not force and merge and match and
1233 if (not force and merge and match and
1234 (match.files() or match.anypats())):
1234 (match.files() or match.anypats())):
1235 raise util.Abort(_('cannot partially commit a merge '
1235 raise util.Abort(_('cannot partially commit a merge '
1236 '(do not specify files or patterns)'))
1236 '(do not specify files or patterns)'))
1237
1237
1238 changes = self.status(match=match, clean=force)
1238 status = self.status(match=match, clean=force)
1239 if force:
1239 if force:
1240 changes[0].extend(changes[6]) # mq may commit unchanged files
1240 status.modified.extend(status.clean) # mq may commit clean files
1241
1241
1242 # check subrepos
1242 # check subrepos
1243 subs = []
1243 subs = []
1244 commitsubs = set()
1244 commitsubs = set()
1245 newstate = wctx.substate.copy()
1245 newstate = wctx.substate.copy()
1246 # only manage subrepos and .hgsubstate if .hgsub is present
1246 # only manage subrepos and .hgsubstate if .hgsub is present
1247 if '.hgsub' in wctx:
1247 if '.hgsub' in wctx:
1248 # we'll decide whether to track this ourselves, thanks
1248 # we'll decide whether to track this ourselves, thanks
1249 for c in changes[:3]:
1249 for c in status.modified, status.added, status.removed:
1250 if '.hgsubstate' in c:
1250 if '.hgsubstate' in c:
1251 c.remove('.hgsubstate')
1251 c.remove('.hgsubstate')
1252
1252
1253 # compare current state to last committed state
1253 # compare current state to last committed state
1254 # build new substate based on last committed state
1254 # build new substate based on last committed state
1255 oldstate = wctx.p1().substate
1255 oldstate = wctx.p1().substate
1256 for s in sorted(newstate.keys()):
1256 for s in sorted(newstate.keys()):
1257 if not match(s):
1257 if not match(s):
1258 # ignore working copy, use old state if present
1258 # ignore working copy, use old state if present
1259 if s in oldstate:
1259 if s in oldstate:
1260 newstate[s] = oldstate[s]
1260 newstate[s] = oldstate[s]
1261 continue
1261 continue
1262 if not force:
1262 if not force:
1263 raise util.Abort(
1263 raise util.Abort(
1264 _("commit with new subrepo %s excluded") % s)
1264 _("commit with new subrepo %s excluded") % s)
1265 if wctx.sub(s).dirty(True):
1265 if wctx.sub(s).dirty(True):
1266 if not self.ui.configbool('ui', 'commitsubrepos'):
1266 if not self.ui.configbool('ui', 'commitsubrepos'):
1267 raise util.Abort(
1267 raise util.Abort(
1268 _("uncommitted changes in subrepo %s") % s,
1268 _("uncommitted changes in subrepo %s") % s,
1269 hint=_("use --subrepos for recursive commit"))
1269 hint=_("use --subrepos for recursive commit"))
1270 subs.append(s)
1270 subs.append(s)
1271 commitsubs.add(s)
1271 commitsubs.add(s)
1272 else:
1272 else:
1273 bs = wctx.sub(s).basestate()
1273 bs = wctx.sub(s).basestate()
1274 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1274 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1275 if oldstate.get(s, (None, None, None))[1] != bs:
1275 if oldstate.get(s, (None, None, None))[1] != bs:
1276 subs.append(s)
1276 subs.append(s)
1277
1277
1278 # check for removed subrepos
1278 # check for removed subrepos
1279 for p in wctx.parents():
1279 for p in wctx.parents():
1280 r = [s for s in p.substate if s not in newstate]
1280 r = [s for s in p.substate if s not in newstate]
1281 subs += [s for s in r if match(s)]
1281 subs += [s for s in r if match(s)]
1282 if subs:
1282 if subs:
1283 if (not match('.hgsub') and
1283 if (not match('.hgsub') and
1284 '.hgsub' in (wctx.modified() + wctx.added())):
1284 '.hgsub' in (wctx.modified() + wctx.added())):
1285 raise util.Abort(
1285 raise util.Abort(
1286 _("can't commit subrepos without .hgsub"))
1286 _("can't commit subrepos without .hgsub"))
1287 changes[0].insert(0, '.hgsubstate')
1287 status.modified.insert(0, '.hgsubstate')
1288
1288
1289 elif '.hgsub' in changes[2]:
1289 elif '.hgsub' in status.removed:
1290 # clean up .hgsubstate when .hgsub is removed
1290 # clean up .hgsubstate when .hgsub is removed
1291 if ('.hgsubstate' in wctx and
1291 if ('.hgsubstate' in wctx and
1292 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1292 '.hgsubstate' not in (status.modified + status.added +
1293 changes[2].insert(0, '.hgsubstate')
1293 status.removed)):
1294 status.removed.insert(0, '.hgsubstate')
1294
1295
1295 # make sure all explicit patterns are matched
1296 # make sure all explicit patterns are matched
1296 if not force and match.files():
1297 if not force and match.files():
1297 matched = set(changes[0] + changes[1] + changes[2])
1298 matched = set(status.modified + status.added + status.removed)
1298
1299
1299 for f in match.files():
1300 for f in match.files():
1300 f = self.dirstate.normalize(f)
1301 f = self.dirstate.normalize(f)
1301 if f == '.' or f in matched or f in wctx.substate:
1302 if f == '.' or f in matched or f in wctx.substate:
1302 continue
1303 continue
1303 if f in changes[3]: # missing
1304 if f in status.deleted:
1304 fail(f, _('file not found!'))
1305 fail(f, _('file not found!'))
1305 if f in vdirs: # visited directory
1306 if f in vdirs: # visited directory
1306 d = f + '/'
1307 d = f + '/'
1307 for mf in matched:
1308 for mf in matched:
1308 if mf.startswith(d):
1309 if mf.startswith(d):
1309 break
1310 break
1310 else:
1311 else:
1311 fail(f, _("no match under directory!"))
1312 fail(f, _("no match under directory!"))
1312 elif f not in self.dirstate:
1313 elif f not in self.dirstate:
1313 fail(f, _("file not tracked!"))
1314 fail(f, _("file not tracked!"))
1314
1315
1315 cctx = context.workingctx(self, text, user, date, extra, changes)
1316 cctx = context.workingctx(self, text, user, date, extra, status)
1316
1317
1317 if (not force and not extra.get("close") and not merge
1318 if (not force and not extra.get("close") and not merge
1318 and not cctx.files()
1319 and not cctx.files()
1319 and wctx.branch() == wctx.p1().branch()):
1320 and wctx.branch() == wctx.p1().branch()):
1320 return None
1321 return None
1321
1322
1322 if merge and cctx.deleted():
1323 if merge and cctx.deleted():
1323 raise util.Abort(_("cannot commit merge with missing files"))
1324 raise util.Abort(_("cannot commit merge with missing files"))
1324
1325
1325 ms = mergemod.mergestate(self)
1326 ms = mergemod.mergestate(self)
1326 for f in changes[0]:
1327 for f in status.modified:
1327 if f in ms and ms[f] == 'u':
1328 if f in ms and ms[f] == 'u':
1328 raise util.Abort(_("unresolved merge conflicts "
1329 raise util.Abort(_("unresolved merge conflicts "
1329 "(see hg help resolve)"))
1330 "(see hg help resolve)"))
1330
1331
1331 if editor:
1332 if editor:
1332 cctx._text = editor(self, cctx, subs)
1333 cctx._text = editor(self, cctx, subs)
1333 edited = (text != cctx._text)
1334 edited = (text != cctx._text)
1334
1335
1335 # Save commit message in case this transaction gets rolled back
1336 # Save commit message in case this transaction gets rolled back
1336 # (e.g. by a pretxncommit hook). Leave the content alone on
1337 # (e.g. by a pretxncommit hook). Leave the content alone on
1337 # the assumption that the user will use the same editor again.
1338 # the assumption that the user will use the same editor again.
1338 msgfn = self.savecommitmessage(cctx._text)
1339 msgfn = self.savecommitmessage(cctx._text)
1339
1340
1340 # commit subs and write new state
1341 # commit subs and write new state
1341 if subs:
1342 if subs:
1342 for s in sorted(commitsubs):
1343 for s in sorted(commitsubs):
1343 sub = wctx.sub(s)
1344 sub = wctx.sub(s)
1344 self.ui.status(_('committing subrepository %s\n') %
1345 self.ui.status(_('committing subrepository %s\n') %
1345 subrepo.subrelpath(sub))
1346 subrepo.subrelpath(sub))
1346 sr = sub.commit(cctx._text, user, date)
1347 sr = sub.commit(cctx._text, user, date)
1347 newstate[s] = (newstate[s][0], sr)
1348 newstate[s] = (newstate[s][0], sr)
1348 subrepo.writestate(self, newstate)
1349 subrepo.writestate(self, newstate)
1349
1350
1350 p1, p2 = self.dirstate.parents()
1351 p1, p2 = self.dirstate.parents()
1351 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1352 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1352 try:
1353 try:
1353 self.hook("precommit", throw=True, parent1=hookp1,
1354 self.hook("precommit", throw=True, parent1=hookp1,
1354 parent2=hookp2)
1355 parent2=hookp2)
1355 ret = self.commitctx(cctx, True)
1356 ret = self.commitctx(cctx, True)
1356 except: # re-raises
1357 except: # re-raises
1357 if edited:
1358 if edited:
1358 self.ui.write(
1359 self.ui.write(
1359 _('note: commit message saved in %s\n') % msgfn)
1360 _('note: commit message saved in %s\n') % msgfn)
1360 raise
1361 raise
1361
1362
1362 # update bookmarks, dirstate and mergestate
1363 # update bookmarks, dirstate and mergestate
1363 bookmarks.update(self, [p1, p2], ret)
1364 bookmarks.update(self, [p1, p2], ret)
1364 cctx.markcommitted(ret)
1365 cctx.markcommitted(ret)
1365 ms.reset()
1366 ms.reset()
1366 finally:
1367 finally:
1367 wlock.release()
1368 wlock.release()
1368
1369
1369 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1370 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1370 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1371 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1371 self._afterlock(commithook)
1372 self._afterlock(commithook)
1372 return ret
1373 return ret
1373
1374
1374 @unfilteredmethod
1375 @unfilteredmethod
1375 def commitctx(self, ctx, error=False):
1376 def commitctx(self, ctx, error=False):
1376 """Add a new revision to current repository.
1377 """Add a new revision to current repository.
1377 Revision information is passed via the context argument.
1378 Revision information is passed via the context argument.
1378 """
1379 """
1379
1380
1380 tr = None
1381 tr = None
1381 p1, p2 = ctx.p1(), ctx.p2()
1382 p1, p2 = ctx.p1(), ctx.p2()
1382 user = ctx.user()
1383 user = ctx.user()
1383
1384
1384 lock = self.lock()
1385 lock = self.lock()
1385 try:
1386 try:
1386 tr = self.transaction("commit")
1387 tr = self.transaction("commit")
1387 trp = weakref.proxy(tr)
1388 trp = weakref.proxy(tr)
1388
1389
1389 if ctx.files():
1390 if ctx.files():
1390 m1 = p1.manifest()
1391 m1 = p1.manifest()
1391 m2 = p2.manifest()
1392 m2 = p2.manifest()
1392 m = m1.copy()
1393 m = m1.copy()
1393
1394
1394 # check in files
1395 # check in files
1395 added = []
1396 added = []
1396 changed = []
1397 changed = []
1397 removed = list(ctx.removed())
1398 removed = list(ctx.removed())
1398 linkrev = len(self)
1399 linkrev = len(self)
1399 for f in sorted(ctx.modified() + ctx.added()):
1400 for f in sorted(ctx.modified() + ctx.added()):
1400 self.ui.note(f + "\n")
1401 self.ui.note(f + "\n")
1401 try:
1402 try:
1402 fctx = ctx[f]
1403 fctx = ctx[f]
1403 if fctx is None:
1404 if fctx is None:
1404 removed.append(f)
1405 removed.append(f)
1405 else:
1406 else:
1406 added.append(f)
1407 added.append(f)
1407 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1408 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1408 trp, changed)
1409 trp, changed)
1409 m.set(f, fctx.flags())
1410 m.set(f, fctx.flags())
1410 except OSError, inst:
1411 except OSError, inst:
1411 self.ui.warn(_("trouble committing %s!\n") % f)
1412 self.ui.warn(_("trouble committing %s!\n") % f)
1412 raise
1413 raise
1413 except IOError, inst:
1414 except IOError, inst:
1414 errcode = getattr(inst, 'errno', errno.ENOENT)
1415 errcode = getattr(inst, 'errno', errno.ENOENT)
1415 if error or errcode and errcode != errno.ENOENT:
1416 if error or errcode and errcode != errno.ENOENT:
1416 self.ui.warn(_("trouble committing %s!\n") % f)
1417 self.ui.warn(_("trouble committing %s!\n") % f)
1417 raise
1418 raise
1418
1419
1419 # update manifest
1420 # update manifest
1420 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1421 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1421 drop = [f for f in removed if f in m]
1422 drop = [f for f in removed if f in m]
1422 for f in drop:
1423 for f in drop:
1423 del m[f]
1424 del m[f]
1424 mn = self.manifest.add(m, trp, linkrev,
1425 mn = self.manifest.add(m, trp, linkrev,
1425 p1.manifestnode(), p2.manifestnode(),
1426 p1.manifestnode(), p2.manifestnode(),
1426 added, drop)
1427 added, drop)
1427 files = changed + removed
1428 files = changed + removed
1428 else:
1429 else:
1429 mn = p1.manifestnode()
1430 mn = p1.manifestnode()
1430 files = []
1431 files = []
1431
1432
1432 # update changelog
1433 # update changelog
1433 self.changelog.delayupdate()
1434 self.changelog.delayupdate()
1434 n = self.changelog.add(mn, files, ctx.description(),
1435 n = self.changelog.add(mn, files, ctx.description(),
1435 trp, p1.node(), p2.node(),
1436 trp, p1.node(), p2.node(),
1436 user, ctx.date(), ctx.extra().copy())
1437 user, ctx.date(), ctx.extra().copy())
1437 p = lambda: self.changelog.writepending() and self.root or ""
1438 p = lambda: self.changelog.writepending() and self.root or ""
1438 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1439 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1439 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1440 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1440 parent2=xp2, pending=p)
1441 parent2=xp2, pending=p)
1441 self.changelog.finalize(trp)
1442 self.changelog.finalize(trp)
1442 # set the new commit is proper phase
1443 # set the new commit is proper phase
1443 targetphase = subrepo.newcommitphase(self.ui, ctx)
1444 targetphase = subrepo.newcommitphase(self.ui, ctx)
1444 if targetphase:
1445 if targetphase:
1445 # retract boundary do not alter parent changeset.
1446 # retract boundary do not alter parent changeset.
1446 # if a parent have higher the resulting phase will
1447 # if a parent have higher the resulting phase will
1447 # be compliant anyway
1448 # be compliant anyway
1448 #
1449 #
1449 # if minimal phase was 0 we don't need to retract anything
1450 # if minimal phase was 0 we don't need to retract anything
1450 phases.retractboundary(self, tr, targetphase, [n])
1451 phases.retractboundary(self, tr, targetphase, [n])
1451 tr.close()
1452 tr.close()
1452 branchmap.updatecache(self.filtered('served'))
1453 branchmap.updatecache(self.filtered('served'))
1453 return n
1454 return n
1454 finally:
1455 finally:
1455 if tr:
1456 if tr:
1456 tr.release()
1457 tr.release()
1457 lock.release()
1458 lock.release()
1458
1459
1459 @unfilteredmethod
1460 @unfilteredmethod
1460 def destroying(self):
1461 def destroying(self):
1461 '''Inform the repository that nodes are about to be destroyed.
1462 '''Inform the repository that nodes are about to be destroyed.
1462 Intended for use by strip and rollback, so there's a common
1463 Intended for use by strip and rollback, so there's a common
1463 place for anything that has to be done before destroying history.
1464 place for anything that has to be done before destroying history.
1464
1465
1465 This is mostly useful for saving state that is in memory and waiting
1466 This is mostly useful for saving state that is in memory and waiting
1466 to be flushed when the current lock is released. Because a call to
1467 to be flushed when the current lock is released. Because a call to
1467 destroyed is imminent, the repo will be invalidated causing those
1468 destroyed is imminent, the repo will be invalidated causing those
1468 changes to stay in memory (waiting for the next unlock), or vanish
1469 changes to stay in memory (waiting for the next unlock), or vanish
1469 completely.
1470 completely.
1470 '''
1471 '''
1471 # When using the same lock to commit and strip, the phasecache is left
1472 # When using the same lock to commit and strip, the phasecache is left
1472 # dirty after committing. Then when we strip, the repo is invalidated,
1473 # dirty after committing. Then when we strip, the repo is invalidated,
1473 # causing those changes to disappear.
1474 # causing those changes to disappear.
1474 if '_phasecache' in vars(self):
1475 if '_phasecache' in vars(self):
1475 self._phasecache.write()
1476 self._phasecache.write()
1476
1477
1477 @unfilteredmethod
1478 @unfilteredmethod
1478 def destroyed(self):
1479 def destroyed(self):
1479 '''Inform the repository that nodes have been destroyed.
1480 '''Inform the repository that nodes have been destroyed.
1480 Intended for use by strip and rollback, so there's a common
1481 Intended for use by strip and rollback, so there's a common
1481 place for anything that has to be done after destroying history.
1482 place for anything that has to be done after destroying history.
1482 '''
1483 '''
1483 # When one tries to:
1484 # When one tries to:
1484 # 1) destroy nodes thus calling this method (e.g. strip)
1485 # 1) destroy nodes thus calling this method (e.g. strip)
1485 # 2) use phasecache somewhere (e.g. commit)
1486 # 2) use phasecache somewhere (e.g. commit)
1486 #
1487 #
1487 # then 2) will fail because the phasecache contains nodes that were
1488 # then 2) will fail because the phasecache contains nodes that were
1488 # removed. We can either remove phasecache from the filecache,
1489 # removed. We can either remove phasecache from the filecache,
1489 # causing it to reload next time it is accessed, or simply filter
1490 # causing it to reload next time it is accessed, or simply filter
1490 # the removed nodes now and write the updated cache.
1491 # the removed nodes now and write the updated cache.
1491 self._phasecache.filterunknown(self)
1492 self._phasecache.filterunknown(self)
1492 self._phasecache.write()
1493 self._phasecache.write()
1493
1494
1494 # update the 'served' branch cache to help read only server process
1495 # update the 'served' branch cache to help read only server process
1495 # Thanks to branchcache collaboration this is done from the nearest
1496 # Thanks to branchcache collaboration this is done from the nearest
1496 # filtered subset and it is expected to be fast.
1497 # filtered subset and it is expected to be fast.
1497 branchmap.updatecache(self.filtered('served'))
1498 branchmap.updatecache(self.filtered('served'))
1498
1499
1499 # Ensure the persistent tag cache is updated. Doing it now
1500 # Ensure the persistent tag cache is updated. Doing it now
1500 # means that the tag cache only has to worry about destroyed
1501 # means that the tag cache only has to worry about destroyed
1501 # heads immediately after a strip/rollback. That in turn
1502 # heads immediately after a strip/rollback. That in turn
1502 # guarantees that "cachetip == currenttip" (comparing both rev
1503 # guarantees that "cachetip == currenttip" (comparing both rev
1503 # and node) always means no nodes have been added or destroyed.
1504 # and node) always means no nodes have been added or destroyed.
1504
1505
1505 # XXX this is suboptimal when qrefresh'ing: we strip the current
1506 # XXX this is suboptimal when qrefresh'ing: we strip the current
1506 # head, refresh the tag cache, then immediately add a new head.
1507 # head, refresh the tag cache, then immediately add a new head.
1507 # But I think doing it this way is necessary for the "instant
1508 # But I think doing it this way is necessary for the "instant
1508 # tag cache retrieval" case to work.
1509 # tag cache retrieval" case to work.
1509 self.invalidate()
1510 self.invalidate()
1510
1511
1511 def walk(self, match, node=None):
1512 def walk(self, match, node=None):
1512 '''
1513 '''
1513 walk recursively through the directory tree or a given
1514 walk recursively through the directory tree or a given
1514 changeset, finding all files matched by the match
1515 changeset, finding all files matched by the match
1515 function
1516 function
1516 '''
1517 '''
1517 return self[node].walk(match)
1518 return self[node].walk(match)
1518
1519
1519 def status(self, node1='.', node2=None, match=None,
1520 def status(self, node1='.', node2=None, match=None,
1520 ignored=False, clean=False, unknown=False,
1521 ignored=False, clean=False, unknown=False,
1521 listsubrepos=False):
1522 listsubrepos=False):
1522 '''a convenience method that calls node1.status(node2)'''
1523 '''a convenience method that calls node1.status(node2)'''
1523 return self[node1].status(node2, match, ignored, clean, unknown,
1524 return self[node1].status(node2, match, ignored, clean, unknown,
1524 listsubrepos)
1525 listsubrepos)
1525
1526
1526 def heads(self, start=None):
1527 def heads(self, start=None):
1527 heads = self.changelog.heads(start)
1528 heads = self.changelog.heads(start)
1528 # sort the output in rev descending order
1529 # sort the output in rev descending order
1529 return sorted(heads, key=self.changelog.rev, reverse=True)
1530 return sorted(heads, key=self.changelog.rev, reverse=True)
1530
1531
1531 def branchheads(self, branch=None, start=None, closed=False):
1532 def branchheads(self, branch=None, start=None, closed=False):
1532 '''return a (possibly filtered) list of heads for the given branch
1533 '''return a (possibly filtered) list of heads for the given branch
1533
1534
1534 Heads are returned in topological order, from newest to oldest.
1535 Heads are returned in topological order, from newest to oldest.
1535 If branch is None, use the dirstate branch.
1536 If branch is None, use the dirstate branch.
1536 If start is not None, return only heads reachable from start.
1537 If start is not None, return only heads reachable from start.
1537 If closed is True, return heads that are marked as closed as well.
1538 If closed is True, return heads that are marked as closed as well.
1538 '''
1539 '''
1539 if branch is None:
1540 if branch is None:
1540 branch = self[None].branch()
1541 branch = self[None].branch()
1541 branches = self.branchmap()
1542 branches = self.branchmap()
1542 if branch not in branches:
1543 if branch not in branches:
1543 return []
1544 return []
1544 # the cache returns heads ordered lowest to highest
1545 # the cache returns heads ordered lowest to highest
1545 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1546 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1546 if start is not None:
1547 if start is not None:
1547 # filter out the heads that cannot be reached from startrev
1548 # filter out the heads that cannot be reached from startrev
1548 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1549 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1549 bheads = [h for h in bheads if h in fbheads]
1550 bheads = [h for h in bheads if h in fbheads]
1550 return bheads
1551 return bheads
1551
1552
1552 def branches(self, nodes):
1553 def branches(self, nodes):
1553 if not nodes:
1554 if not nodes:
1554 nodes = [self.changelog.tip()]
1555 nodes = [self.changelog.tip()]
1555 b = []
1556 b = []
1556 for n in nodes:
1557 for n in nodes:
1557 t = n
1558 t = n
1558 while True:
1559 while True:
1559 p = self.changelog.parents(n)
1560 p = self.changelog.parents(n)
1560 if p[1] != nullid or p[0] == nullid:
1561 if p[1] != nullid or p[0] == nullid:
1561 b.append((t, n, p[0], p[1]))
1562 b.append((t, n, p[0], p[1]))
1562 break
1563 break
1563 n = p[0]
1564 n = p[0]
1564 return b
1565 return b
1565
1566
1566 def between(self, pairs):
1567 def between(self, pairs):
1567 r = []
1568 r = []
1568
1569
1569 for top, bottom in pairs:
1570 for top, bottom in pairs:
1570 n, l, i = top, [], 0
1571 n, l, i = top, [], 0
1571 f = 1
1572 f = 1
1572
1573
1573 while n != bottom and n != nullid:
1574 while n != bottom and n != nullid:
1574 p = self.changelog.parents(n)[0]
1575 p = self.changelog.parents(n)[0]
1575 if i == f:
1576 if i == f:
1576 l.append(n)
1577 l.append(n)
1577 f = f * 2
1578 f = f * 2
1578 n = p
1579 n = p
1579 i += 1
1580 i += 1
1580
1581
1581 r.append(l)
1582 r.append(l)
1582
1583
1583 return r
1584 return r
1584
1585
1585 def checkpush(self, pushop):
1586 def checkpush(self, pushop):
1586 """Extensions can override this function if additional checks have
1587 """Extensions can override this function if additional checks have
1587 to be performed before pushing, or call it if they override push
1588 to be performed before pushing, or call it if they override push
1588 command.
1589 command.
1589 """
1590 """
1590 pass
1591 pass
1591
1592
1592 @unfilteredpropertycache
1593 @unfilteredpropertycache
1593 def prepushoutgoinghooks(self):
1594 def prepushoutgoinghooks(self):
1594 """Return util.hooks consists of "(repo, remote, outgoing)"
1595 """Return util.hooks consists of "(repo, remote, outgoing)"
1595 functions, which are called before pushing changesets.
1596 functions, which are called before pushing changesets.
1596 """
1597 """
1597 return util.hooks()
1598 return util.hooks()
1598
1599
1599 def stream_in(self, remote, requirements):
1600 def stream_in(self, remote, requirements):
1600 lock = self.lock()
1601 lock = self.lock()
1601 try:
1602 try:
1602 # Save remote branchmap. We will use it later
1603 # Save remote branchmap. We will use it later
1603 # to speed up branchcache creation
1604 # to speed up branchcache creation
1604 rbranchmap = None
1605 rbranchmap = None
1605 if remote.capable("branchmap"):
1606 if remote.capable("branchmap"):
1606 rbranchmap = remote.branchmap()
1607 rbranchmap = remote.branchmap()
1607
1608
1608 fp = remote.stream_out()
1609 fp = remote.stream_out()
1609 l = fp.readline()
1610 l = fp.readline()
1610 try:
1611 try:
1611 resp = int(l)
1612 resp = int(l)
1612 except ValueError:
1613 except ValueError:
1613 raise error.ResponseError(
1614 raise error.ResponseError(
1614 _('unexpected response from remote server:'), l)
1615 _('unexpected response from remote server:'), l)
1615 if resp == 1:
1616 if resp == 1:
1616 raise util.Abort(_('operation forbidden by server'))
1617 raise util.Abort(_('operation forbidden by server'))
1617 elif resp == 2:
1618 elif resp == 2:
1618 raise util.Abort(_('locking the remote repository failed'))
1619 raise util.Abort(_('locking the remote repository failed'))
1619 elif resp != 0:
1620 elif resp != 0:
1620 raise util.Abort(_('the server sent an unknown error code'))
1621 raise util.Abort(_('the server sent an unknown error code'))
1621 self.ui.status(_('streaming all changes\n'))
1622 self.ui.status(_('streaming all changes\n'))
1622 l = fp.readline()
1623 l = fp.readline()
1623 try:
1624 try:
1624 total_files, total_bytes = map(int, l.split(' ', 1))
1625 total_files, total_bytes = map(int, l.split(' ', 1))
1625 except (ValueError, TypeError):
1626 except (ValueError, TypeError):
1626 raise error.ResponseError(
1627 raise error.ResponseError(
1627 _('unexpected response from remote server:'), l)
1628 _('unexpected response from remote server:'), l)
1628 self.ui.status(_('%d files to transfer, %s of data\n') %
1629 self.ui.status(_('%d files to transfer, %s of data\n') %
1629 (total_files, util.bytecount(total_bytes)))
1630 (total_files, util.bytecount(total_bytes)))
1630 handled_bytes = 0
1631 handled_bytes = 0
1631 self.ui.progress(_('clone'), 0, total=total_bytes)
1632 self.ui.progress(_('clone'), 0, total=total_bytes)
1632 start = time.time()
1633 start = time.time()
1633
1634
1634 tr = self.transaction(_('clone'))
1635 tr = self.transaction(_('clone'))
1635 try:
1636 try:
1636 for i in xrange(total_files):
1637 for i in xrange(total_files):
1637 # XXX doesn't support '\n' or '\r' in filenames
1638 # XXX doesn't support '\n' or '\r' in filenames
1638 l = fp.readline()
1639 l = fp.readline()
1639 try:
1640 try:
1640 name, size = l.split('\0', 1)
1641 name, size = l.split('\0', 1)
1641 size = int(size)
1642 size = int(size)
1642 except (ValueError, TypeError):
1643 except (ValueError, TypeError):
1643 raise error.ResponseError(
1644 raise error.ResponseError(
1644 _('unexpected response from remote server:'), l)
1645 _('unexpected response from remote server:'), l)
1645 if self.ui.debugflag:
1646 if self.ui.debugflag:
1646 self.ui.debug('adding %s (%s)\n' %
1647 self.ui.debug('adding %s (%s)\n' %
1647 (name, util.bytecount(size)))
1648 (name, util.bytecount(size)))
1648 # for backwards compat, name was partially encoded
1649 # for backwards compat, name was partially encoded
1649 ofp = self.sopener(store.decodedir(name), 'w')
1650 ofp = self.sopener(store.decodedir(name), 'w')
1650 for chunk in util.filechunkiter(fp, limit=size):
1651 for chunk in util.filechunkiter(fp, limit=size):
1651 handled_bytes += len(chunk)
1652 handled_bytes += len(chunk)
1652 self.ui.progress(_('clone'), handled_bytes,
1653 self.ui.progress(_('clone'), handled_bytes,
1653 total=total_bytes)
1654 total=total_bytes)
1654 ofp.write(chunk)
1655 ofp.write(chunk)
1655 ofp.close()
1656 ofp.close()
1656 tr.close()
1657 tr.close()
1657 finally:
1658 finally:
1658 tr.release()
1659 tr.release()
1659
1660
1660 # Writing straight to files circumvented the inmemory caches
1661 # Writing straight to files circumvented the inmemory caches
1661 self.invalidate()
1662 self.invalidate()
1662
1663
1663 elapsed = time.time() - start
1664 elapsed = time.time() - start
1664 if elapsed <= 0:
1665 if elapsed <= 0:
1665 elapsed = 0.001
1666 elapsed = 0.001
1666 self.ui.progress(_('clone'), None)
1667 self.ui.progress(_('clone'), None)
1667 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1668 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1668 (util.bytecount(total_bytes), elapsed,
1669 (util.bytecount(total_bytes), elapsed,
1669 util.bytecount(total_bytes / elapsed)))
1670 util.bytecount(total_bytes / elapsed)))
1670
1671
1671 # new requirements = old non-format requirements +
1672 # new requirements = old non-format requirements +
1672 # new format-related
1673 # new format-related
1673 # requirements from the streamed-in repository
1674 # requirements from the streamed-in repository
1674 requirements.update(set(self.requirements) - self.supportedformats)
1675 requirements.update(set(self.requirements) - self.supportedformats)
1675 self._applyrequirements(requirements)
1676 self._applyrequirements(requirements)
1676 self._writerequirements()
1677 self._writerequirements()
1677
1678
1678 if rbranchmap:
1679 if rbranchmap:
1679 rbheads = []
1680 rbheads = []
1680 for bheads in rbranchmap.itervalues():
1681 for bheads in rbranchmap.itervalues():
1681 rbheads.extend(bheads)
1682 rbheads.extend(bheads)
1682
1683
1683 if rbheads:
1684 if rbheads:
1684 rtiprev = max((int(self.changelog.rev(node))
1685 rtiprev = max((int(self.changelog.rev(node))
1685 for node in rbheads))
1686 for node in rbheads))
1686 cache = branchmap.branchcache(rbranchmap,
1687 cache = branchmap.branchcache(rbranchmap,
1687 self[rtiprev].node(),
1688 self[rtiprev].node(),
1688 rtiprev)
1689 rtiprev)
1689 # Try to stick it as low as possible
1690 # Try to stick it as low as possible
1690 # filter above served are unlikely to be fetch from a clone
1691 # filter above served are unlikely to be fetch from a clone
1691 for candidate in ('base', 'immutable', 'served'):
1692 for candidate in ('base', 'immutable', 'served'):
1692 rview = self.filtered(candidate)
1693 rview = self.filtered(candidate)
1693 if cache.validfor(rview):
1694 if cache.validfor(rview):
1694 self._branchcaches[candidate] = cache
1695 self._branchcaches[candidate] = cache
1695 cache.write(rview)
1696 cache.write(rview)
1696 break
1697 break
1697 self.invalidate()
1698 self.invalidate()
1698 return len(self.heads()) + 1
1699 return len(self.heads()) + 1
1699 finally:
1700 finally:
1700 lock.release()
1701 lock.release()
1701
1702
1702 def clone(self, remote, heads=[], stream=False):
1703 def clone(self, remote, heads=[], stream=False):
1703 '''clone remote repository.
1704 '''clone remote repository.
1704
1705
1705 keyword arguments:
1706 keyword arguments:
1706 heads: list of revs to clone (forces use of pull)
1707 heads: list of revs to clone (forces use of pull)
1707 stream: use streaming clone if possible'''
1708 stream: use streaming clone if possible'''
1708
1709
1709 # now, all clients that can request uncompressed clones can
1710 # now, all clients that can request uncompressed clones can
1710 # read repo formats supported by all servers that can serve
1711 # read repo formats supported by all servers that can serve
1711 # them.
1712 # them.
1712
1713
1713 # if revlog format changes, client will have to check version
1714 # if revlog format changes, client will have to check version
1714 # and format flags on "stream" capability, and use
1715 # and format flags on "stream" capability, and use
1715 # uncompressed only if compatible.
1716 # uncompressed only if compatible.
1716
1717
1717 if not stream:
1718 if not stream:
1718 # if the server explicitly prefers to stream (for fast LANs)
1719 # if the server explicitly prefers to stream (for fast LANs)
1719 stream = remote.capable('stream-preferred')
1720 stream = remote.capable('stream-preferred')
1720
1721
1721 if stream and not heads:
1722 if stream and not heads:
1722 # 'stream' means remote revlog format is revlogv1 only
1723 # 'stream' means remote revlog format is revlogv1 only
1723 if remote.capable('stream'):
1724 if remote.capable('stream'):
1724 return self.stream_in(remote, set(('revlogv1',)))
1725 return self.stream_in(remote, set(('revlogv1',)))
1725 # otherwise, 'streamreqs' contains the remote revlog format
1726 # otherwise, 'streamreqs' contains the remote revlog format
1726 streamreqs = remote.capable('streamreqs')
1727 streamreqs = remote.capable('streamreqs')
1727 if streamreqs:
1728 if streamreqs:
1728 streamreqs = set(streamreqs.split(','))
1729 streamreqs = set(streamreqs.split(','))
1729 # if we support it, stream in and adjust our requirements
1730 # if we support it, stream in and adjust our requirements
1730 if not streamreqs - self.supportedformats:
1731 if not streamreqs - self.supportedformats:
1731 return self.stream_in(remote, streamreqs)
1732 return self.stream_in(remote, streamreqs)
1732
1733
1733 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1734 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1734 try:
1735 try:
1735 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1736 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1736 ret = exchange.pull(self, remote, heads).cgresult
1737 ret = exchange.pull(self, remote, heads).cgresult
1737 finally:
1738 finally:
1738 self.ui.restoreconfig(quiet)
1739 self.ui.restoreconfig(quiet)
1739 return ret
1740 return ret
1740
1741
1741 def pushkey(self, namespace, key, old, new):
1742 def pushkey(self, namespace, key, old, new):
1742 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1743 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1743 old=old, new=new)
1744 old=old, new=new)
1744 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1745 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1745 ret = pushkey.push(self, namespace, key, old, new)
1746 ret = pushkey.push(self, namespace, key, old, new)
1746 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1747 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1747 ret=ret)
1748 ret=ret)
1748 return ret
1749 return ret
1749
1750
1750 def listkeys(self, namespace):
1751 def listkeys(self, namespace):
1751 self.hook('prelistkeys', throw=True, namespace=namespace)
1752 self.hook('prelistkeys', throw=True, namespace=namespace)
1752 self.ui.debug('listing keys for "%s"\n' % namespace)
1753 self.ui.debug('listing keys for "%s"\n' % namespace)
1753 values = pushkey.list(self, namespace)
1754 values = pushkey.list(self, namespace)
1754 self.hook('listkeys', namespace=namespace, values=values)
1755 self.hook('listkeys', namespace=namespace, values=values)
1755 return values
1756 return values
1756
1757
1757 def debugwireargs(self, one, two, three=None, four=None, five=None):
1758 def debugwireargs(self, one, two, three=None, four=None, five=None):
1758 '''used to test argument passing over the wire'''
1759 '''used to test argument passing over the wire'''
1759 return "%s %s %s %s %s" % (one, two, three, four, five)
1760 return "%s %s %s %s %s" % (one, two, three, four, five)
1760
1761
1761 def savecommitmessage(self, text):
1762 def savecommitmessage(self, text):
1762 fp = self.opener('last-message.txt', 'wb')
1763 fp = self.opener('last-message.txt', 'wb')
1763 try:
1764 try:
1764 fp.write(text)
1765 fp.write(text)
1765 finally:
1766 finally:
1766 fp.close()
1767 fp.close()
1767 return self.pathto(fp.name[len(self.root) + 1:])
1768 return self.pathto(fp.name[len(self.root) + 1:])
1768
1769
1769 # used to avoid circular references so destructors work
1770 # used to avoid circular references so destructors work
1770 def aftertrans(files):
1771 def aftertrans(files):
1771 renamefiles = [tuple(t) for t in files]
1772 renamefiles = [tuple(t) for t in files]
1772 def a():
1773 def a():
1773 for vfs, src, dest in renamefiles:
1774 for vfs, src, dest in renamefiles:
1774 try:
1775 try:
1775 vfs.rename(src, dest)
1776 vfs.rename(src, dest)
1776 except OSError: # journal file does not yet exist
1777 except OSError: # journal file does not yet exist
1777 pass
1778 pass
1778 return a
1779 return a
1779
1780
1780 def undoname(fn):
1781 def undoname(fn):
1781 base, name = os.path.split(fn)
1782 base, name = os.path.split(fn)
1782 assert name.startswith('journal')
1783 assert name.startswith('journal')
1783 return os.path.join(base, name.replace('journal', 'undo', 1))
1784 return os.path.join(base, name.replace('journal', 'undo', 1))
1784
1785
1785 def instance(ui, path, create):
1786 def instance(ui, path, create):
1786 return localrepository(ui, util.urllocalpath(path), create)
1787 return localrepository(ui, util.urllocalpath(path), create)
1787
1788
1788 def islocal(path):
1789 def islocal(path):
1789 return True
1790 return True
General Comments 0
You need to be logged in to leave comments. Login now