##// END OF EJS Templates
bundle2: use discard to remove bundle2 cap...
Durham Goode -
r20963:ffddabb8 default
parent child Browse files
Show More
@@ -1,1876 +1,1876 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock as lockmod
11 import lock as lockmod
12 import transaction, store, encoding, exchange
12 import transaction, store, encoding, exchange
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 import branchmap, pathutil
19 import branchmap, pathutil
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21 filecache = scmutil.filecache
21 filecache = scmutil.filecache
22
22
23 class repofilecache(filecache):
23 class repofilecache(filecache):
24 """All filecache usage on repo are done for logic that should be unfiltered
24 """All filecache usage on repo are done for logic that should be unfiltered
25 """
25 """
26
26
27 def __get__(self, repo, type=None):
27 def __get__(self, repo, type=None):
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 def __set__(self, repo, value):
29 def __set__(self, repo, value):
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 def __delete__(self, repo):
31 def __delete__(self, repo):
32 return super(repofilecache, self).__delete__(repo.unfiltered())
32 return super(repofilecache, self).__delete__(repo.unfiltered())
33
33
34 class storecache(repofilecache):
34 class storecache(repofilecache):
35 """filecache for files in the store"""
35 """filecache for files in the store"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj.sjoin(fname)
37 return obj.sjoin(fname)
38
38
39 class unfilteredpropertycache(propertycache):
39 class unfilteredpropertycache(propertycache):
40 """propertycache that apply to unfiltered repo only"""
40 """propertycache that apply to unfiltered repo only"""
41
41
42 def __get__(self, repo, type=None):
42 def __get__(self, repo, type=None):
43 unfi = repo.unfiltered()
43 unfi = repo.unfiltered()
44 if unfi is repo:
44 if unfi is repo:
45 return super(unfilteredpropertycache, self).__get__(unfi)
45 return super(unfilteredpropertycache, self).__get__(unfi)
46 return getattr(unfi, self.name)
46 return getattr(unfi, self.name)
47
47
48 class filteredpropertycache(propertycache):
48 class filteredpropertycache(propertycache):
49 """propertycache that must take filtering in account"""
49 """propertycache that must take filtering in account"""
50
50
51 def cachevalue(self, obj, value):
51 def cachevalue(self, obj, value):
52 object.__setattr__(obj, self.name, value)
52 object.__setattr__(obj, self.name, value)
53
53
54
54
55 def hasunfilteredcache(repo, name):
55 def hasunfilteredcache(repo, name):
56 """check if a repo has an unfilteredpropertycache value for <name>"""
56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 return name in vars(repo.unfiltered())
57 return name in vars(repo.unfiltered())
58
58
59 def unfilteredmethod(orig):
59 def unfilteredmethod(orig):
60 """decorate method that always need to be run on unfiltered version"""
60 """decorate method that always need to be run on unfiltered version"""
61 def wrapper(repo, *args, **kwargs):
61 def wrapper(repo, *args, **kwargs):
62 return orig(repo.unfiltered(), *args, **kwargs)
62 return orig(repo.unfiltered(), *args, **kwargs)
63 return wrapper
63 return wrapper
64
64
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 'bundle2'))
66 'bundle2'))
67 legacycaps = moderncaps.union(set(['changegroupsubset']))
67 legacycaps = moderncaps.union(set(['changegroupsubset']))
68
68
69 class localpeer(peer.peerrepository):
69 class localpeer(peer.peerrepository):
70 '''peer for a local repo; reflects only the most recent API'''
70 '''peer for a local repo; reflects only the most recent API'''
71
71
72 def __init__(self, repo, caps=moderncaps):
72 def __init__(self, repo, caps=moderncaps):
73 peer.peerrepository.__init__(self)
73 peer.peerrepository.__init__(self)
74 self._repo = repo.filtered('served')
74 self._repo = repo.filtered('served')
75 self.ui = repo.ui
75 self.ui = repo.ui
76 self._caps = repo._restrictcapabilities(caps)
76 self._caps = repo._restrictcapabilities(caps)
77 self.requirements = repo.requirements
77 self.requirements = repo.requirements
78 self.supportedformats = repo.supportedformats
78 self.supportedformats = repo.supportedformats
79
79
80 def close(self):
80 def close(self):
81 self._repo.close()
81 self._repo.close()
82
82
83 def _capabilities(self):
83 def _capabilities(self):
84 return self._caps
84 return self._caps
85
85
86 def local(self):
86 def local(self):
87 return self._repo
87 return self._repo
88
88
89 def canpush(self):
89 def canpush(self):
90 return True
90 return True
91
91
92 def url(self):
92 def url(self):
93 return self._repo.url()
93 return self._repo.url()
94
94
95 def lookup(self, key):
95 def lookup(self, key):
96 return self._repo.lookup(key)
96 return self._repo.lookup(key)
97
97
98 def branchmap(self):
98 def branchmap(self):
99 return self._repo.branchmap()
99 return self._repo.branchmap()
100
100
101 def heads(self):
101 def heads(self):
102 return self._repo.heads()
102 return self._repo.heads()
103
103
104 def known(self, nodes):
104 def known(self, nodes):
105 return self._repo.known(nodes)
105 return self._repo.known(nodes)
106
106
107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 format='HG10'):
108 format='HG10'):
109 return exchange.getbundle(self._repo, source, heads=heads,
109 return exchange.getbundle(self._repo, source, heads=heads,
110 common=common, bundlecaps=bundlecaps)
110 common=common, bundlecaps=bundlecaps)
111
111
112 # TODO We might want to move the next two calls into legacypeer and add
112 # TODO We might want to move the next two calls into legacypeer and add
113 # unbundle instead.
113 # unbundle instead.
114
114
115 def lock(self):
115 def lock(self):
116 return self._repo.lock()
116 return self._repo.lock()
117
117
118 def addchangegroup(self, cg, source, url):
118 def addchangegroup(self, cg, source, url):
119 return changegroup.addchangegroup(self._repo, cg, source, url)
119 return changegroup.addchangegroup(self._repo, cg, source, url)
120
120
121 def pushkey(self, namespace, key, old, new):
121 def pushkey(self, namespace, key, old, new):
122 return self._repo.pushkey(namespace, key, old, new)
122 return self._repo.pushkey(namespace, key, old, new)
123
123
124 def listkeys(self, namespace):
124 def listkeys(self, namespace):
125 return self._repo.listkeys(namespace)
125 return self._repo.listkeys(namespace)
126
126
127 def debugwireargs(self, one, two, three=None, four=None, five=None):
127 def debugwireargs(self, one, two, three=None, four=None, five=None):
128 '''used to test argument passing over the wire'''
128 '''used to test argument passing over the wire'''
129 return "%s %s %s %s %s" % (one, two, three, four, five)
129 return "%s %s %s %s %s" % (one, two, three, four, five)
130
130
131 class locallegacypeer(localpeer):
131 class locallegacypeer(localpeer):
132 '''peer extension which implements legacy methods too; used for tests with
132 '''peer extension which implements legacy methods too; used for tests with
133 restricted capabilities'''
133 restricted capabilities'''
134
134
135 def __init__(self, repo):
135 def __init__(self, repo):
136 localpeer.__init__(self, repo, caps=legacycaps)
136 localpeer.__init__(self, repo, caps=legacycaps)
137
137
138 def branches(self, nodes):
138 def branches(self, nodes):
139 return self._repo.branches(nodes)
139 return self._repo.branches(nodes)
140
140
141 def between(self, pairs):
141 def between(self, pairs):
142 return self._repo.between(pairs)
142 return self._repo.between(pairs)
143
143
144 def changegroup(self, basenodes, source):
144 def changegroup(self, basenodes, source):
145 return changegroup.changegroup(self._repo, basenodes, source)
145 return changegroup.changegroup(self._repo, basenodes, source)
146
146
147 def changegroupsubset(self, bases, heads, source):
147 def changegroupsubset(self, bases, heads, source):
148 return changegroup.changegroupsubset(self._repo, bases, heads, source)
148 return changegroup.changegroupsubset(self._repo, bases, heads, source)
149
149
150 class localrepository(object):
150 class localrepository(object):
151
151
152 supportedformats = set(('revlogv1', 'generaldelta'))
152 supportedformats = set(('revlogv1', 'generaldelta'))
153 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
153 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
154 'dotencode'))
154 'dotencode'))
155 openerreqs = set(('revlogv1', 'generaldelta'))
155 openerreqs = set(('revlogv1', 'generaldelta'))
156 requirements = ['revlogv1']
156 requirements = ['revlogv1']
157 filtername = None
157 filtername = None
158
158
159 # a list of (ui, featureset) functions.
159 # a list of (ui, featureset) functions.
160 # only functions defined in module of enabled extensions are invoked
160 # only functions defined in module of enabled extensions are invoked
161 featuresetupfuncs = set()
161 featuresetupfuncs = set()
162
162
163 def _baserequirements(self, create):
163 def _baserequirements(self, create):
164 return self.requirements[:]
164 return self.requirements[:]
165
165
166 def __init__(self, baseui, path=None, create=False):
166 def __init__(self, baseui, path=None, create=False):
167 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
167 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
168 self.wopener = self.wvfs
168 self.wopener = self.wvfs
169 self.root = self.wvfs.base
169 self.root = self.wvfs.base
170 self.path = self.wvfs.join(".hg")
170 self.path = self.wvfs.join(".hg")
171 self.origroot = path
171 self.origroot = path
172 self.auditor = pathutil.pathauditor(self.root, self._checknested)
172 self.auditor = pathutil.pathauditor(self.root, self._checknested)
173 self.vfs = scmutil.vfs(self.path)
173 self.vfs = scmutil.vfs(self.path)
174 self.opener = self.vfs
174 self.opener = self.vfs
175 self.baseui = baseui
175 self.baseui = baseui
176 self.ui = baseui.copy()
176 self.ui = baseui.copy()
177 self.ui.copy = baseui.copy # prevent copying repo configuration
177 self.ui.copy = baseui.copy # prevent copying repo configuration
178 # A list of callback to shape the phase if no data were found.
178 # A list of callback to shape the phase if no data were found.
179 # Callback are in the form: func(repo, roots) --> processed root.
179 # Callback are in the form: func(repo, roots) --> processed root.
180 # This list it to be filled by extension during repo setup
180 # This list it to be filled by extension during repo setup
181 self._phasedefaults = []
181 self._phasedefaults = []
182 try:
182 try:
183 self.ui.readconfig(self.join("hgrc"), self.root)
183 self.ui.readconfig(self.join("hgrc"), self.root)
184 extensions.loadall(self.ui)
184 extensions.loadall(self.ui)
185 except IOError:
185 except IOError:
186 pass
186 pass
187
187
188 if self.featuresetupfuncs:
188 if self.featuresetupfuncs:
189 self.supported = set(self._basesupported) # use private copy
189 self.supported = set(self._basesupported) # use private copy
190 extmods = set(m.__name__ for n, m
190 extmods = set(m.__name__ for n, m
191 in extensions.extensions(self.ui))
191 in extensions.extensions(self.ui))
192 for setupfunc in self.featuresetupfuncs:
192 for setupfunc in self.featuresetupfuncs:
193 if setupfunc.__module__ in extmods:
193 if setupfunc.__module__ in extmods:
194 setupfunc(self.ui, self.supported)
194 setupfunc(self.ui, self.supported)
195 else:
195 else:
196 self.supported = self._basesupported
196 self.supported = self._basesupported
197
197
198 if not self.vfs.isdir():
198 if not self.vfs.isdir():
199 if create:
199 if create:
200 if not self.wvfs.exists():
200 if not self.wvfs.exists():
201 self.wvfs.makedirs()
201 self.wvfs.makedirs()
202 self.vfs.makedir(notindexed=True)
202 self.vfs.makedir(notindexed=True)
203 requirements = self._baserequirements(create)
203 requirements = self._baserequirements(create)
204 if self.ui.configbool('format', 'usestore', True):
204 if self.ui.configbool('format', 'usestore', True):
205 self.vfs.mkdir("store")
205 self.vfs.mkdir("store")
206 requirements.append("store")
206 requirements.append("store")
207 if self.ui.configbool('format', 'usefncache', True):
207 if self.ui.configbool('format', 'usefncache', True):
208 requirements.append("fncache")
208 requirements.append("fncache")
209 if self.ui.configbool('format', 'dotencode', True):
209 if self.ui.configbool('format', 'dotencode', True):
210 requirements.append('dotencode')
210 requirements.append('dotencode')
211 # create an invalid changelog
211 # create an invalid changelog
212 self.vfs.append(
212 self.vfs.append(
213 "00changelog.i",
213 "00changelog.i",
214 '\0\0\0\2' # represents revlogv2
214 '\0\0\0\2' # represents revlogv2
215 ' dummy changelog to prevent using the old repo layout'
215 ' dummy changelog to prevent using the old repo layout'
216 )
216 )
217 if self.ui.configbool('format', 'generaldelta', False):
217 if self.ui.configbool('format', 'generaldelta', False):
218 requirements.append("generaldelta")
218 requirements.append("generaldelta")
219 requirements = set(requirements)
219 requirements = set(requirements)
220 else:
220 else:
221 raise error.RepoError(_("repository %s not found") % path)
221 raise error.RepoError(_("repository %s not found") % path)
222 elif create:
222 elif create:
223 raise error.RepoError(_("repository %s already exists") % path)
223 raise error.RepoError(_("repository %s already exists") % path)
224 else:
224 else:
225 try:
225 try:
226 requirements = scmutil.readrequires(self.vfs, self.supported)
226 requirements = scmutil.readrequires(self.vfs, self.supported)
227 except IOError, inst:
227 except IOError, inst:
228 if inst.errno != errno.ENOENT:
228 if inst.errno != errno.ENOENT:
229 raise
229 raise
230 requirements = set()
230 requirements = set()
231
231
232 self.sharedpath = self.path
232 self.sharedpath = self.path
233 try:
233 try:
234 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
234 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
235 realpath=True)
235 realpath=True)
236 s = vfs.base
236 s = vfs.base
237 if not vfs.exists():
237 if not vfs.exists():
238 raise error.RepoError(
238 raise error.RepoError(
239 _('.hg/sharedpath points to nonexistent directory %s') % s)
239 _('.hg/sharedpath points to nonexistent directory %s') % s)
240 self.sharedpath = s
240 self.sharedpath = s
241 except IOError, inst:
241 except IOError, inst:
242 if inst.errno != errno.ENOENT:
242 if inst.errno != errno.ENOENT:
243 raise
243 raise
244
244
245 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
245 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
246 self.spath = self.store.path
246 self.spath = self.store.path
247 self.svfs = self.store.vfs
247 self.svfs = self.store.vfs
248 self.sopener = self.svfs
248 self.sopener = self.svfs
249 self.sjoin = self.store.join
249 self.sjoin = self.store.join
250 self.vfs.createmode = self.store.createmode
250 self.vfs.createmode = self.store.createmode
251 self._applyrequirements(requirements)
251 self._applyrequirements(requirements)
252 if create:
252 if create:
253 self._writerequirements()
253 self._writerequirements()
254
254
255
255
256 self._branchcaches = {}
256 self._branchcaches = {}
257 self.filterpats = {}
257 self.filterpats = {}
258 self._datafilters = {}
258 self._datafilters = {}
259 self._transref = self._lockref = self._wlockref = None
259 self._transref = self._lockref = self._wlockref = None
260
260
261 # A cache for various files under .hg/ that tracks file changes,
261 # A cache for various files under .hg/ that tracks file changes,
262 # (used by the filecache decorator)
262 # (used by the filecache decorator)
263 #
263 #
264 # Maps a property name to its util.filecacheentry
264 # Maps a property name to its util.filecacheentry
265 self._filecache = {}
265 self._filecache = {}
266
266
267 # hold sets of revision to be filtered
267 # hold sets of revision to be filtered
268 # should be cleared when something might have changed the filter value:
268 # should be cleared when something might have changed the filter value:
269 # - new changesets,
269 # - new changesets,
270 # - phase change,
270 # - phase change,
271 # - new obsolescence marker,
271 # - new obsolescence marker,
272 # - working directory parent change,
272 # - working directory parent change,
273 # - bookmark changes
273 # - bookmark changes
274 self.filteredrevcache = {}
274 self.filteredrevcache = {}
275
275
276 def close(self):
276 def close(self):
277 pass
277 pass
278
278
279 def _restrictcapabilities(self, caps):
279 def _restrictcapabilities(self, caps):
280 # bundle2 is not ready for prime time, drop it unless explicitly
280 # bundle2 is not ready for prime time, drop it unless explicitly
281 # required by the tests (or some brave tester)
281 # required by the tests (or some brave tester)
282 if not self.ui.configbool('server', 'bundle2', False):
282 if not self.ui.configbool('server', 'bundle2', False):
283 caps = set(caps)
283 caps = set(caps)
284 caps.remove('bundle2')
284 caps.discard('bundle2')
285 return caps
285 return caps
286
286
287 def _applyrequirements(self, requirements):
287 def _applyrequirements(self, requirements):
288 self.requirements = requirements
288 self.requirements = requirements
289 self.sopener.options = dict((r, 1) for r in requirements
289 self.sopener.options = dict((r, 1) for r in requirements
290 if r in self.openerreqs)
290 if r in self.openerreqs)
291 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
291 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
292 if chunkcachesize is not None:
292 if chunkcachesize is not None:
293 self.sopener.options['chunkcachesize'] = chunkcachesize
293 self.sopener.options['chunkcachesize'] = chunkcachesize
294
294
295 def _writerequirements(self):
295 def _writerequirements(self):
296 reqfile = self.opener("requires", "w")
296 reqfile = self.opener("requires", "w")
297 for r in sorted(self.requirements):
297 for r in sorted(self.requirements):
298 reqfile.write("%s\n" % r)
298 reqfile.write("%s\n" % r)
299 reqfile.close()
299 reqfile.close()
300
300
301 def _checknested(self, path):
301 def _checknested(self, path):
302 """Determine if path is a legal nested repository."""
302 """Determine if path is a legal nested repository."""
303 if not path.startswith(self.root):
303 if not path.startswith(self.root):
304 return False
304 return False
305 subpath = path[len(self.root) + 1:]
305 subpath = path[len(self.root) + 1:]
306 normsubpath = util.pconvert(subpath)
306 normsubpath = util.pconvert(subpath)
307
307
308 # XXX: Checking against the current working copy is wrong in
308 # XXX: Checking against the current working copy is wrong in
309 # the sense that it can reject things like
309 # the sense that it can reject things like
310 #
310 #
311 # $ hg cat -r 10 sub/x.txt
311 # $ hg cat -r 10 sub/x.txt
312 #
312 #
313 # if sub/ is no longer a subrepository in the working copy
313 # if sub/ is no longer a subrepository in the working copy
314 # parent revision.
314 # parent revision.
315 #
315 #
316 # However, it can of course also allow things that would have
316 # However, it can of course also allow things that would have
317 # been rejected before, such as the above cat command if sub/
317 # been rejected before, such as the above cat command if sub/
318 # is a subrepository now, but was a normal directory before.
318 # is a subrepository now, but was a normal directory before.
319 # The old path auditor would have rejected by mistake since it
319 # The old path auditor would have rejected by mistake since it
320 # panics when it sees sub/.hg/.
320 # panics when it sees sub/.hg/.
321 #
321 #
322 # All in all, checking against the working copy seems sensible
322 # All in all, checking against the working copy seems sensible
323 # since we want to prevent access to nested repositories on
323 # since we want to prevent access to nested repositories on
324 # the filesystem *now*.
324 # the filesystem *now*.
325 ctx = self[None]
325 ctx = self[None]
326 parts = util.splitpath(subpath)
326 parts = util.splitpath(subpath)
327 while parts:
327 while parts:
328 prefix = '/'.join(parts)
328 prefix = '/'.join(parts)
329 if prefix in ctx.substate:
329 if prefix in ctx.substate:
330 if prefix == normsubpath:
330 if prefix == normsubpath:
331 return True
331 return True
332 else:
332 else:
333 sub = ctx.sub(prefix)
333 sub = ctx.sub(prefix)
334 return sub.checknested(subpath[len(prefix) + 1:])
334 return sub.checknested(subpath[len(prefix) + 1:])
335 else:
335 else:
336 parts.pop()
336 parts.pop()
337 return False
337 return False
338
338
339 def peer(self):
339 def peer(self):
340 return localpeer(self) # not cached to avoid reference cycle
340 return localpeer(self) # not cached to avoid reference cycle
341
341
342 def unfiltered(self):
342 def unfiltered(self):
343 """Return unfiltered version of the repository
343 """Return unfiltered version of the repository
344
344
345 Intended to be overwritten by filtered repo."""
345 Intended to be overwritten by filtered repo."""
346 return self
346 return self
347
347
348 def filtered(self, name):
348 def filtered(self, name):
349 """Return a filtered version of a repository"""
349 """Return a filtered version of a repository"""
350 # build a new class with the mixin and the current class
350 # build a new class with the mixin and the current class
351 # (possibly subclass of the repo)
351 # (possibly subclass of the repo)
352 class proxycls(repoview.repoview, self.unfiltered().__class__):
352 class proxycls(repoview.repoview, self.unfiltered().__class__):
353 pass
353 pass
354 return proxycls(self, name)
354 return proxycls(self, name)
355
355
356 @repofilecache('bookmarks')
356 @repofilecache('bookmarks')
357 def _bookmarks(self):
357 def _bookmarks(self):
358 return bookmarks.bmstore(self)
358 return bookmarks.bmstore(self)
359
359
360 @repofilecache('bookmarks.current')
360 @repofilecache('bookmarks.current')
361 def _bookmarkcurrent(self):
361 def _bookmarkcurrent(self):
362 return bookmarks.readcurrent(self)
362 return bookmarks.readcurrent(self)
363
363
364 def bookmarkheads(self, bookmark):
364 def bookmarkheads(self, bookmark):
365 name = bookmark.split('@', 1)[0]
365 name = bookmark.split('@', 1)[0]
366 heads = []
366 heads = []
367 for mark, n in self._bookmarks.iteritems():
367 for mark, n in self._bookmarks.iteritems():
368 if mark.split('@', 1)[0] == name:
368 if mark.split('@', 1)[0] == name:
369 heads.append(n)
369 heads.append(n)
370 return heads
370 return heads
371
371
372 @storecache('phaseroots')
372 @storecache('phaseroots')
373 def _phasecache(self):
373 def _phasecache(self):
374 return phases.phasecache(self, self._phasedefaults)
374 return phases.phasecache(self, self._phasedefaults)
375
375
376 @storecache('obsstore')
376 @storecache('obsstore')
377 def obsstore(self):
377 def obsstore(self):
378 store = obsolete.obsstore(self.sopener)
378 store = obsolete.obsstore(self.sopener)
379 if store and not obsolete._enabled:
379 if store and not obsolete._enabled:
380 # message is rare enough to not be translated
380 # message is rare enough to not be translated
381 msg = 'obsolete feature not enabled but %i markers found!\n'
381 msg = 'obsolete feature not enabled but %i markers found!\n'
382 self.ui.warn(msg % len(list(store)))
382 self.ui.warn(msg % len(list(store)))
383 return store
383 return store
384
384
385 @storecache('00changelog.i')
385 @storecache('00changelog.i')
386 def changelog(self):
386 def changelog(self):
387 c = changelog.changelog(self.sopener)
387 c = changelog.changelog(self.sopener)
388 if 'HG_PENDING' in os.environ:
388 if 'HG_PENDING' in os.environ:
389 p = os.environ['HG_PENDING']
389 p = os.environ['HG_PENDING']
390 if p.startswith(self.root):
390 if p.startswith(self.root):
391 c.readpending('00changelog.i.a')
391 c.readpending('00changelog.i.a')
392 return c
392 return c
393
393
394 @storecache('00manifest.i')
394 @storecache('00manifest.i')
395 def manifest(self):
395 def manifest(self):
396 return manifest.manifest(self.sopener)
396 return manifest.manifest(self.sopener)
397
397
398 @repofilecache('dirstate')
398 @repofilecache('dirstate')
399 def dirstate(self):
399 def dirstate(self):
400 warned = [0]
400 warned = [0]
401 def validate(node):
401 def validate(node):
402 try:
402 try:
403 self.changelog.rev(node)
403 self.changelog.rev(node)
404 return node
404 return node
405 except error.LookupError:
405 except error.LookupError:
406 if not warned[0]:
406 if not warned[0]:
407 warned[0] = True
407 warned[0] = True
408 self.ui.warn(_("warning: ignoring unknown"
408 self.ui.warn(_("warning: ignoring unknown"
409 " working parent %s!\n") % short(node))
409 " working parent %s!\n") % short(node))
410 return nullid
410 return nullid
411
411
412 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
412 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
413
413
414 def __getitem__(self, changeid):
414 def __getitem__(self, changeid):
415 if changeid is None:
415 if changeid is None:
416 return context.workingctx(self)
416 return context.workingctx(self)
417 return context.changectx(self, changeid)
417 return context.changectx(self, changeid)
418
418
419 def __contains__(self, changeid):
419 def __contains__(self, changeid):
420 try:
420 try:
421 return bool(self.lookup(changeid))
421 return bool(self.lookup(changeid))
422 except error.RepoLookupError:
422 except error.RepoLookupError:
423 return False
423 return False
424
424
425 def __nonzero__(self):
425 def __nonzero__(self):
426 return True
426 return True
427
427
428 def __len__(self):
428 def __len__(self):
429 return len(self.changelog)
429 return len(self.changelog)
430
430
431 def __iter__(self):
431 def __iter__(self):
432 return iter(self.changelog)
432 return iter(self.changelog)
433
433
434 def revs(self, expr, *args):
434 def revs(self, expr, *args):
435 '''Return a list of revisions matching the given revset'''
435 '''Return a list of revisions matching the given revset'''
436 expr = revset.formatspec(expr, *args)
436 expr = revset.formatspec(expr, *args)
437 m = revset.match(None, expr)
437 m = revset.match(None, expr)
438 return m(self, revset.spanset(self))
438 return m(self, revset.spanset(self))
439
439
440 def set(self, expr, *args):
440 def set(self, expr, *args):
441 '''
441 '''
442 Yield a context for each matching revision, after doing arg
442 Yield a context for each matching revision, after doing arg
443 replacement via revset.formatspec
443 replacement via revset.formatspec
444 '''
444 '''
445 for r in self.revs(expr, *args):
445 for r in self.revs(expr, *args):
446 yield self[r]
446 yield self[r]
447
447
448 def url(self):
448 def url(self):
449 return 'file:' + self.root
449 return 'file:' + self.root
450
450
451 def hook(self, name, throw=False, **args):
451 def hook(self, name, throw=False, **args):
452 return hook.hook(self.ui, self, name, throw, **args)
452 return hook.hook(self.ui, self, name, throw, **args)
453
453
454 @unfilteredmethod
454 @unfilteredmethod
455 def _tag(self, names, node, message, local, user, date, extra={}):
455 def _tag(self, names, node, message, local, user, date, extra={}):
456 if isinstance(names, str):
456 if isinstance(names, str):
457 names = (names,)
457 names = (names,)
458
458
459 branches = self.branchmap()
459 branches = self.branchmap()
460 for name in names:
460 for name in names:
461 self.hook('pretag', throw=True, node=hex(node), tag=name,
461 self.hook('pretag', throw=True, node=hex(node), tag=name,
462 local=local)
462 local=local)
463 if name in branches:
463 if name in branches:
464 self.ui.warn(_("warning: tag %s conflicts with existing"
464 self.ui.warn(_("warning: tag %s conflicts with existing"
465 " branch name\n") % name)
465 " branch name\n") % name)
466
466
467 def writetags(fp, names, munge, prevtags):
467 def writetags(fp, names, munge, prevtags):
468 fp.seek(0, 2)
468 fp.seek(0, 2)
469 if prevtags and prevtags[-1] != '\n':
469 if prevtags and prevtags[-1] != '\n':
470 fp.write('\n')
470 fp.write('\n')
471 for name in names:
471 for name in names:
472 m = munge and munge(name) or name
472 m = munge and munge(name) or name
473 if (self._tagscache.tagtypes and
473 if (self._tagscache.tagtypes and
474 name in self._tagscache.tagtypes):
474 name in self._tagscache.tagtypes):
475 old = self.tags().get(name, nullid)
475 old = self.tags().get(name, nullid)
476 fp.write('%s %s\n' % (hex(old), m))
476 fp.write('%s %s\n' % (hex(old), m))
477 fp.write('%s %s\n' % (hex(node), m))
477 fp.write('%s %s\n' % (hex(node), m))
478 fp.close()
478 fp.close()
479
479
480 prevtags = ''
480 prevtags = ''
481 if local:
481 if local:
482 try:
482 try:
483 fp = self.opener('localtags', 'r+')
483 fp = self.opener('localtags', 'r+')
484 except IOError:
484 except IOError:
485 fp = self.opener('localtags', 'a')
485 fp = self.opener('localtags', 'a')
486 else:
486 else:
487 prevtags = fp.read()
487 prevtags = fp.read()
488
488
489 # local tags are stored in the current charset
489 # local tags are stored in the current charset
490 writetags(fp, names, None, prevtags)
490 writetags(fp, names, None, prevtags)
491 for name in names:
491 for name in names:
492 self.hook('tag', node=hex(node), tag=name, local=local)
492 self.hook('tag', node=hex(node), tag=name, local=local)
493 return
493 return
494
494
495 try:
495 try:
496 fp = self.wfile('.hgtags', 'rb+')
496 fp = self.wfile('.hgtags', 'rb+')
497 except IOError, e:
497 except IOError, e:
498 if e.errno != errno.ENOENT:
498 if e.errno != errno.ENOENT:
499 raise
499 raise
500 fp = self.wfile('.hgtags', 'ab')
500 fp = self.wfile('.hgtags', 'ab')
501 else:
501 else:
502 prevtags = fp.read()
502 prevtags = fp.read()
503
503
504 # committed tags are stored in UTF-8
504 # committed tags are stored in UTF-8
505 writetags(fp, names, encoding.fromlocal, prevtags)
505 writetags(fp, names, encoding.fromlocal, prevtags)
506
506
507 fp.close()
507 fp.close()
508
508
509 self.invalidatecaches()
509 self.invalidatecaches()
510
510
511 if '.hgtags' not in self.dirstate:
511 if '.hgtags' not in self.dirstate:
512 self[None].add(['.hgtags'])
512 self[None].add(['.hgtags'])
513
513
514 m = matchmod.exact(self.root, '', ['.hgtags'])
514 m = matchmod.exact(self.root, '', ['.hgtags'])
515 tagnode = self.commit(message, user, date, extra=extra, match=m)
515 tagnode = self.commit(message, user, date, extra=extra, match=m)
516
516
517 for name in names:
517 for name in names:
518 self.hook('tag', node=hex(node), tag=name, local=local)
518 self.hook('tag', node=hex(node), tag=name, local=local)
519
519
520 return tagnode
520 return tagnode
521
521
522 def tag(self, names, node, message, local, user, date):
522 def tag(self, names, node, message, local, user, date):
523 '''tag a revision with one or more symbolic names.
523 '''tag a revision with one or more symbolic names.
524
524
525 names is a list of strings or, when adding a single tag, names may be a
525 names is a list of strings or, when adding a single tag, names may be a
526 string.
526 string.
527
527
528 if local is True, the tags are stored in a per-repository file.
528 if local is True, the tags are stored in a per-repository file.
529 otherwise, they are stored in the .hgtags file, and a new
529 otherwise, they are stored in the .hgtags file, and a new
530 changeset is committed with the change.
530 changeset is committed with the change.
531
531
532 keyword arguments:
532 keyword arguments:
533
533
534 local: whether to store tags in non-version-controlled file
534 local: whether to store tags in non-version-controlled file
535 (default False)
535 (default False)
536
536
537 message: commit message to use if committing
537 message: commit message to use if committing
538
538
539 user: name of user to use if committing
539 user: name of user to use if committing
540
540
541 date: date tuple to use if committing'''
541 date: date tuple to use if committing'''
542
542
543 if not local:
543 if not local:
544 for x in self.status()[:5]:
544 for x in self.status()[:5]:
545 if '.hgtags' in x:
545 if '.hgtags' in x:
546 raise util.Abort(_('working copy of .hgtags is changed '
546 raise util.Abort(_('working copy of .hgtags is changed '
547 '(please commit .hgtags manually)'))
547 '(please commit .hgtags manually)'))
548
548
549 self.tags() # instantiate the cache
549 self.tags() # instantiate the cache
550 self._tag(names, node, message, local, user, date)
550 self._tag(names, node, message, local, user, date)
551
551
552 @filteredpropertycache
552 @filteredpropertycache
553 def _tagscache(self):
553 def _tagscache(self):
554 '''Returns a tagscache object that contains various tags related
554 '''Returns a tagscache object that contains various tags related
555 caches.'''
555 caches.'''
556
556
557 # This simplifies its cache management by having one decorated
557 # This simplifies its cache management by having one decorated
558 # function (this one) and the rest simply fetch things from it.
558 # function (this one) and the rest simply fetch things from it.
559 class tagscache(object):
559 class tagscache(object):
560 def __init__(self):
560 def __init__(self):
561 # These two define the set of tags for this repository. tags
561 # These two define the set of tags for this repository. tags
562 # maps tag name to node; tagtypes maps tag name to 'global' or
562 # maps tag name to node; tagtypes maps tag name to 'global' or
563 # 'local'. (Global tags are defined by .hgtags across all
563 # 'local'. (Global tags are defined by .hgtags across all
564 # heads, and local tags are defined in .hg/localtags.)
564 # heads, and local tags are defined in .hg/localtags.)
565 # They constitute the in-memory cache of tags.
565 # They constitute the in-memory cache of tags.
566 self.tags = self.tagtypes = None
566 self.tags = self.tagtypes = None
567
567
568 self.nodetagscache = self.tagslist = None
568 self.nodetagscache = self.tagslist = None
569
569
570 cache = tagscache()
570 cache = tagscache()
571 cache.tags, cache.tagtypes = self._findtags()
571 cache.tags, cache.tagtypes = self._findtags()
572
572
573 return cache
573 return cache
574
574
575 def tags(self):
575 def tags(self):
576 '''return a mapping of tag to node'''
576 '''return a mapping of tag to node'''
577 t = {}
577 t = {}
578 if self.changelog.filteredrevs:
578 if self.changelog.filteredrevs:
579 tags, tt = self._findtags()
579 tags, tt = self._findtags()
580 else:
580 else:
581 tags = self._tagscache.tags
581 tags = self._tagscache.tags
582 for k, v in tags.iteritems():
582 for k, v in tags.iteritems():
583 try:
583 try:
584 # ignore tags to unknown nodes
584 # ignore tags to unknown nodes
585 self.changelog.rev(v)
585 self.changelog.rev(v)
586 t[k] = v
586 t[k] = v
587 except (error.LookupError, ValueError):
587 except (error.LookupError, ValueError):
588 pass
588 pass
589 return t
589 return t
590
590
591 def _findtags(self):
591 def _findtags(self):
592 '''Do the hard work of finding tags. Return a pair of dicts
592 '''Do the hard work of finding tags. Return a pair of dicts
593 (tags, tagtypes) where tags maps tag name to node, and tagtypes
593 (tags, tagtypes) where tags maps tag name to node, and tagtypes
594 maps tag name to a string like \'global\' or \'local\'.
594 maps tag name to a string like \'global\' or \'local\'.
595 Subclasses or extensions are free to add their own tags, but
595 Subclasses or extensions are free to add their own tags, but
596 should be aware that the returned dicts will be retained for the
596 should be aware that the returned dicts will be retained for the
597 duration of the localrepo object.'''
597 duration of the localrepo object.'''
598
598
599 # XXX what tagtype should subclasses/extensions use? Currently
599 # XXX what tagtype should subclasses/extensions use? Currently
600 # mq and bookmarks add tags, but do not set the tagtype at all.
600 # mq and bookmarks add tags, but do not set the tagtype at all.
601 # Should each extension invent its own tag type? Should there
601 # Should each extension invent its own tag type? Should there
602 # be one tagtype for all such "virtual" tags? Or is the status
602 # be one tagtype for all such "virtual" tags? Or is the status
603 # quo fine?
603 # quo fine?
604
604
605 alltags = {} # map tag name to (node, hist)
605 alltags = {} # map tag name to (node, hist)
606 tagtypes = {}
606 tagtypes = {}
607
607
608 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
608 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
609 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
609 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
610
610
611 # Build the return dicts. Have to re-encode tag names because
611 # Build the return dicts. Have to re-encode tag names because
612 # the tags module always uses UTF-8 (in order not to lose info
612 # the tags module always uses UTF-8 (in order not to lose info
613 # writing to the cache), but the rest of Mercurial wants them in
613 # writing to the cache), but the rest of Mercurial wants them in
614 # local encoding.
614 # local encoding.
615 tags = {}
615 tags = {}
616 for (name, (node, hist)) in alltags.iteritems():
616 for (name, (node, hist)) in alltags.iteritems():
617 if node != nullid:
617 if node != nullid:
618 tags[encoding.tolocal(name)] = node
618 tags[encoding.tolocal(name)] = node
619 tags['tip'] = self.changelog.tip()
619 tags['tip'] = self.changelog.tip()
620 tagtypes = dict([(encoding.tolocal(name), value)
620 tagtypes = dict([(encoding.tolocal(name), value)
621 for (name, value) in tagtypes.iteritems()])
621 for (name, value) in tagtypes.iteritems()])
622 return (tags, tagtypes)
622 return (tags, tagtypes)
623
623
624 def tagtype(self, tagname):
624 def tagtype(self, tagname):
625 '''
625 '''
626 return the type of the given tag. result can be:
626 return the type of the given tag. result can be:
627
627
628 'local' : a local tag
628 'local' : a local tag
629 'global' : a global tag
629 'global' : a global tag
630 None : tag does not exist
630 None : tag does not exist
631 '''
631 '''
632
632
633 return self._tagscache.tagtypes.get(tagname)
633 return self._tagscache.tagtypes.get(tagname)
634
634
635 def tagslist(self):
635 def tagslist(self):
636 '''return a list of tags ordered by revision'''
636 '''return a list of tags ordered by revision'''
637 if not self._tagscache.tagslist:
637 if not self._tagscache.tagslist:
638 l = []
638 l = []
639 for t, n in self.tags().iteritems():
639 for t, n in self.tags().iteritems():
640 r = self.changelog.rev(n)
640 r = self.changelog.rev(n)
641 l.append((r, t, n))
641 l.append((r, t, n))
642 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
642 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
643
643
644 return self._tagscache.tagslist
644 return self._tagscache.tagslist
645
645
646 def nodetags(self, node):
646 def nodetags(self, node):
647 '''return the tags associated with a node'''
647 '''return the tags associated with a node'''
648 if not self._tagscache.nodetagscache:
648 if not self._tagscache.nodetagscache:
649 nodetagscache = {}
649 nodetagscache = {}
650 for t, n in self._tagscache.tags.iteritems():
650 for t, n in self._tagscache.tags.iteritems():
651 nodetagscache.setdefault(n, []).append(t)
651 nodetagscache.setdefault(n, []).append(t)
652 for tags in nodetagscache.itervalues():
652 for tags in nodetagscache.itervalues():
653 tags.sort()
653 tags.sort()
654 self._tagscache.nodetagscache = nodetagscache
654 self._tagscache.nodetagscache = nodetagscache
655 return self._tagscache.nodetagscache.get(node, [])
655 return self._tagscache.nodetagscache.get(node, [])
656
656
657 def nodebookmarks(self, node):
657 def nodebookmarks(self, node):
658 marks = []
658 marks = []
659 for bookmark, n in self._bookmarks.iteritems():
659 for bookmark, n in self._bookmarks.iteritems():
660 if n == node:
660 if n == node:
661 marks.append(bookmark)
661 marks.append(bookmark)
662 return sorted(marks)
662 return sorted(marks)
663
663
664 def branchmap(self):
664 def branchmap(self):
665 '''returns a dictionary {branch: [branchheads]} with branchheads
665 '''returns a dictionary {branch: [branchheads]} with branchheads
666 ordered by increasing revision number'''
666 ordered by increasing revision number'''
667 branchmap.updatecache(self)
667 branchmap.updatecache(self)
668 return self._branchcaches[self.filtername]
668 return self._branchcaches[self.filtername]
669
669
670 def branchtip(self, branch):
670 def branchtip(self, branch):
671 '''return the tip node for a given branch'''
671 '''return the tip node for a given branch'''
672 try:
672 try:
673 return self.branchmap().branchtip(branch)
673 return self.branchmap().branchtip(branch)
674 except KeyError:
674 except KeyError:
675 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
675 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
676
676
677 def lookup(self, key):
677 def lookup(self, key):
678 return self[key].node()
678 return self[key].node()
679
679
680 def lookupbranch(self, key, remote=None):
680 def lookupbranch(self, key, remote=None):
681 repo = remote or self
681 repo = remote or self
682 if key in repo.branchmap():
682 if key in repo.branchmap():
683 return key
683 return key
684
684
685 repo = (remote and remote.local()) and remote or self
685 repo = (remote and remote.local()) and remote or self
686 return repo[key].branch()
686 return repo[key].branch()
687
687
688 def known(self, nodes):
688 def known(self, nodes):
689 nm = self.changelog.nodemap
689 nm = self.changelog.nodemap
690 pc = self._phasecache
690 pc = self._phasecache
691 result = []
691 result = []
692 for n in nodes:
692 for n in nodes:
693 r = nm.get(n)
693 r = nm.get(n)
694 resp = not (r is None or pc.phase(self, r) >= phases.secret)
694 resp = not (r is None or pc.phase(self, r) >= phases.secret)
695 result.append(resp)
695 result.append(resp)
696 return result
696 return result
697
697
698 def local(self):
698 def local(self):
699 return self
699 return self
700
700
701 def cancopy(self):
701 def cancopy(self):
702 # so statichttprepo's override of local() works
702 # so statichttprepo's override of local() works
703 if not self.local():
703 if not self.local():
704 return False
704 return False
705 if not self.ui.configbool('phases', 'publish', True):
705 if not self.ui.configbool('phases', 'publish', True):
706 return True
706 return True
707 # if publishing we can't copy if there is filtered content
707 # if publishing we can't copy if there is filtered content
708 return not self.filtered('visible').changelog.filteredrevs
708 return not self.filtered('visible').changelog.filteredrevs
709
709
710 def join(self, f):
710 def join(self, f):
711 return os.path.join(self.path, f)
711 return os.path.join(self.path, f)
712
712
713 def wjoin(self, f):
713 def wjoin(self, f):
714 return os.path.join(self.root, f)
714 return os.path.join(self.root, f)
715
715
716 def file(self, f):
716 def file(self, f):
717 if f[0] == '/':
717 if f[0] == '/':
718 f = f[1:]
718 f = f[1:]
719 return filelog.filelog(self.sopener, f)
719 return filelog.filelog(self.sopener, f)
720
720
721 def changectx(self, changeid):
721 def changectx(self, changeid):
722 return self[changeid]
722 return self[changeid]
723
723
724 def parents(self, changeid=None):
724 def parents(self, changeid=None):
725 '''get list of changectxs for parents of changeid'''
725 '''get list of changectxs for parents of changeid'''
726 return self[changeid].parents()
726 return self[changeid].parents()
727
727
728 def setparents(self, p1, p2=nullid):
728 def setparents(self, p1, p2=nullid):
729 copies = self.dirstate.setparents(p1, p2)
729 copies = self.dirstate.setparents(p1, p2)
730 pctx = self[p1]
730 pctx = self[p1]
731 if copies:
731 if copies:
732 # Adjust copy records, the dirstate cannot do it, it
732 # Adjust copy records, the dirstate cannot do it, it
733 # requires access to parents manifests. Preserve them
733 # requires access to parents manifests. Preserve them
734 # only for entries added to first parent.
734 # only for entries added to first parent.
735 for f in copies:
735 for f in copies:
736 if f not in pctx and copies[f] in pctx:
736 if f not in pctx and copies[f] in pctx:
737 self.dirstate.copy(copies[f], f)
737 self.dirstate.copy(copies[f], f)
738 if p2 == nullid:
738 if p2 == nullid:
739 for f, s in sorted(self.dirstate.copies().items()):
739 for f, s in sorted(self.dirstate.copies().items()):
740 if f not in pctx and s not in pctx:
740 if f not in pctx and s not in pctx:
741 self.dirstate.copy(None, f)
741 self.dirstate.copy(None, f)
742
742
743 def filectx(self, path, changeid=None, fileid=None):
743 def filectx(self, path, changeid=None, fileid=None):
744 """changeid can be a changeset revision, node, or tag.
744 """changeid can be a changeset revision, node, or tag.
745 fileid can be a file revision or node."""
745 fileid can be a file revision or node."""
746 return context.filectx(self, path, changeid, fileid)
746 return context.filectx(self, path, changeid, fileid)
747
747
748 def getcwd(self):
748 def getcwd(self):
749 return self.dirstate.getcwd()
749 return self.dirstate.getcwd()
750
750
751 def pathto(self, f, cwd=None):
751 def pathto(self, f, cwd=None):
752 return self.dirstate.pathto(f, cwd)
752 return self.dirstate.pathto(f, cwd)
753
753
754 def wfile(self, f, mode='r'):
754 def wfile(self, f, mode='r'):
755 return self.wopener(f, mode)
755 return self.wopener(f, mode)
756
756
757 def _link(self, f):
757 def _link(self, f):
758 return self.wvfs.islink(f)
758 return self.wvfs.islink(f)
759
759
760 def _loadfilter(self, filter):
760 def _loadfilter(self, filter):
761 if filter not in self.filterpats:
761 if filter not in self.filterpats:
762 l = []
762 l = []
763 for pat, cmd in self.ui.configitems(filter):
763 for pat, cmd in self.ui.configitems(filter):
764 if cmd == '!':
764 if cmd == '!':
765 continue
765 continue
766 mf = matchmod.match(self.root, '', [pat])
766 mf = matchmod.match(self.root, '', [pat])
767 fn = None
767 fn = None
768 params = cmd
768 params = cmd
769 for name, filterfn in self._datafilters.iteritems():
769 for name, filterfn in self._datafilters.iteritems():
770 if cmd.startswith(name):
770 if cmd.startswith(name):
771 fn = filterfn
771 fn = filterfn
772 params = cmd[len(name):].lstrip()
772 params = cmd[len(name):].lstrip()
773 break
773 break
774 if not fn:
774 if not fn:
775 fn = lambda s, c, **kwargs: util.filter(s, c)
775 fn = lambda s, c, **kwargs: util.filter(s, c)
776 # Wrap old filters not supporting keyword arguments
776 # Wrap old filters not supporting keyword arguments
777 if not inspect.getargspec(fn)[2]:
777 if not inspect.getargspec(fn)[2]:
778 oldfn = fn
778 oldfn = fn
779 fn = lambda s, c, **kwargs: oldfn(s, c)
779 fn = lambda s, c, **kwargs: oldfn(s, c)
780 l.append((mf, fn, params))
780 l.append((mf, fn, params))
781 self.filterpats[filter] = l
781 self.filterpats[filter] = l
782 return self.filterpats[filter]
782 return self.filterpats[filter]
783
783
784 def _filter(self, filterpats, filename, data):
784 def _filter(self, filterpats, filename, data):
785 for mf, fn, cmd in filterpats:
785 for mf, fn, cmd in filterpats:
786 if mf(filename):
786 if mf(filename):
787 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
787 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
788 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
788 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
789 break
789 break
790
790
791 return data
791 return data
792
792
793 @unfilteredpropertycache
793 @unfilteredpropertycache
794 def _encodefilterpats(self):
794 def _encodefilterpats(self):
795 return self._loadfilter('encode')
795 return self._loadfilter('encode')
796
796
797 @unfilteredpropertycache
797 @unfilteredpropertycache
798 def _decodefilterpats(self):
798 def _decodefilterpats(self):
799 return self._loadfilter('decode')
799 return self._loadfilter('decode')
800
800
801 def adddatafilter(self, name, filter):
801 def adddatafilter(self, name, filter):
802 self._datafilters[name] = filter
802 self._datafilters[name] = filter
803
803
804 def wread(self, filename):
804 def wread(self, filename):
805 if self._link(filename):
805 if self._link(filename):
806 data = self.wvfs.readlink(filename)
806 data = self.wvfs.readlink(filename)
807 else:
807 else:
808 data = self.wopener.read(filename)
808 data = self.wopener.read(filename)
809 return self._filter(self._encodefilterpats, filename, data)
809 return self._filter(self._encodefilterpats, filename, data)
810
810
811 def wwrite(self, filename, data, flags):
811 def wwrite(self, filename, data, flags):
812 data = self._filter(self._decodefilterpats, filename, data)
812 data = self._filter(self._decodefilterpats, filename, data)
813 if 'l' in flags:
813 if 'l' in flags:
814 self.wopener.symlink(data, filename)
814 self.wopener.symlink(data, filename)
815 else:
815 else:
816 self.wopener.write(filename, data)
816 self.wopener.write(filename, data)
817 if 'x' in flags:
817 if 'x' in flags:
818 self.wvfs.setflags(filename, False, True)
818 self.wvfs.setflags(filename, False, True)
819
819
820 def wwritedata(self, filename, data):
820 def wwritedata(self, filename, data):
821 return self._filter(self._decodefilterpats, filename, data)
821 return self._filter(self._decodefilterpats, filename, data)
822
822
823 def transaction(self, desc, report=None):
823 def transaction(self, desc, report=None):
824 tr = self._transref and self._transref() or None
824 tr = self._transref and self._transref() or None
825 if tr and tr.running():
825 if tr and tr.running():
826 return tr.nest()
826 return tr.nest()
827
827
828 # abort here if the journal already exists
828 # abort here if the journal already exists
829 if self.svfs.exists("journal"):
829 if self.svfs.exists("journal"):
830 raise error.RepoError(
830 raise error.RepoError(
831 _("abandoned transaction found - run hg recover"))
831 _("abandoned transaction found - run hg recover"))
832
832
833 def onclose():
833 def onclose():
834 self.store.write(tr)
834 self.store.write(tr)
835
835
836 self._writejournal(desc)
836 self._writejournal(desc)
837 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
837 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
838 rp = report and report or self.ui.warn
838 rp = report and report or self.ui.warn
839 tr = transaction.transaction(rp, self.sopener,
839 tr = transaction.transaction(rp, self.sopener,
840 "journal",
840 "journal",
841 aftertrans(renames),
841 aftertrans(renames),
842 self.store.createmode,
842 self.store.createmode,
843 onclose)
843 onclose)
844 self._transref = weakref.ref(tr)
844 self._transref = weakref.ref(tr)
845 return tr
845 return tr
846
846
847 def _journalfiles(self):
847 def _journalfiles(self):
848 return ((self.svfs, 'journal'),
848 return ((self.svfs, 'journal'),
849 (self.vfs, 'journal.dirstate'),
849 (self.vfs, 'journal.dirstate'),
850 (self.vfs, 'journal.branch'),
850 (self.vfs, 'journal.branch'),
851 (self.vfs, 'journal.desc'),
851 (self.vfs, 'journal.desc'),
852 (self.vfs, 'journal.bookmarks'),
852 (self.vfs, 'journal.bookmarks'),
853 (self.svfs, 'journal.phaseroots'))
853 (self.svfs, 'journal.phaseroots'))
854
854
855 def undofiles(self):
855 def undofiles(self):
856 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
856 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
857
857
858 def _writejournal(self, desc):
858 def _writejournal(self, desc):
859 self.opener.write("journal.dirstate",
859 self.opener.write("journal.dirstate",
860 self.opener.tryread("dirstate"))
860 self.opener.tryread("dirstate"))
861 self.opener.write("journal.branch",
861 self.opener.write("journal.branch",
862 encoding.fromlocal(self.dirstate.branch()))
862 encoding.fromlocal(self.dirstate.branch()))
863 self.opener.write("journal.desc",
863 self.opener.write("journal.desc",
864 "%d\n%s\n" % (len(self), desc))
864 "%d\n%s\n" % (len(self), desc))
865 self.opener.write("journal.bookmarks",
865 self.opener.write("journal.bookmarks",
866 self.opener.tryread("bookmarks"))
866 self.opener.tryread("bookmarks"))
867 self.sopener.write("journal.phaseroots",
867 self.sopener.write("journal.phaseroots",
868 self.sopener.tryread("phaseroots"))
868 self.sopener.tryread("phaseroots"))
869
869
870 def recover(self):
870 def recover(self):
871 lock = self.lock()
871 lock = self.lock()
872 try:
872 try:
873 if self.svfs.exists("journal"):
873 if self.svfs.exists("journal"):
874 self.ui.status(_("rolling back interrupted transaction\n"))
874 self.ui.status(_("rolling back interrupted transaction\n"))
875 transaction.rollback(self.sopener, "journal",
875 transaction.rollback(self.sopener, "journal",
876 self.ui.warn)
876 self.ui.warn)
877 self.invalidate()
877 self.invalidate()
878 return True
878 return True
879 else:
879 else:
880 self.ui.warn(_("no interrupted transaction available\n"))
880 self.ui.warn(_("no interrupted transaction available\n"))
881 return False
881 return False
882 finally:
882 finally:
883 lock.release()
883 lock.release()
884
884
885 def rollback(self, dryrun=False, force=False):
885 def rollback(self, dryrun=False, force=False):
886 wlock = lock = None
886 wlock = lock = None
887 try:
887 try:
888 wlock = self.wlock()
888 wlock = self.wlock()
889 lock = self.lock()
889 lock = self.lock()
890 if self.svfs.exists("undo"):
890 if self.svfs.exists("undo"):
891 return self._rollback(dryrun, force)
891 return self._rollback(dryrun, force)
892 else:
892 else:
893 self.ui.warn(_("no rollback information available\n"))
893 self.ui.warn(_("no rollback information available\n"))
894 return 1
894 return 1
895 finally:
895 finally:
896 release(lock, wlock)
896 release(lock, wlock)
897
897
898 @unfilteredmethod # Until we get smarter cache management
898 @unfilteredmethod # Until we get smarter cache management
899 def _rollback(self, dryrun, force):
899 def _rollback(self, dryrun, force):
900 ui = self.ui
900 ui = self.ui
901 try:
901 try:
902 args = self.opener.read('undo.desc').splitlines()
902 args = self.opener.read('undo.desc').splitlines()
903 (oldlen, desc, detail) = (int(args[0]), args[1], None)
903 (oldlen, desc, detail) = (int(args[0]), args[1], None)
904 if len(args) >= 3:
904 if len(args) >= 3:
905 detail = args[2]
905 detail = args[2]
906 oldtip = oldlen - 1
906 oldtip = oldlen - 1
907
907
908 if detail and ui.verbose:
908 if detail and ui.verbose:
909 msg = (_('repository tip rolled back to revision %s'
909 msg = (_('repository tip rolled back to revision %s'
910 ' (undo %s: %s)\n')
910 ' (undo %s: %s)\n')
911 % (oldtip, desc, detail))
911 % (oldtip, desc, detail))
912 else:
912 else:
913 msg = (_('repository tip rolled back to revision %s'
913 msg = (_('repository tip rolled back to revision %s'
914 ' (undo %s)\n')
914 ' (undo %s)\n')
915 % (oldtip, desc))
915 % (oldtip, desc))
916 except IOError:
916 except IOError:
917 msg = _('rolling back unknown transaction\n')
917 msg = _('rolling back unknown transaction\n')
918 desc = None
918 desc = None
919
919
920 if not force and self['.'] != self['tip'] and desc == 'commit':
920 if not force and self['.'] != self['tip'] and desc == 'commit':
921 raise util.Abort(
921 raise util.Abort(
922 _('rollback of last commit while not checked out '
922 _('rollback of last commit while not checked out '
923 'may lose data'), hint=_('use -f to force'))
923 'may lose data'), hint=_('use -f to force'))
924
924
925 ui.status(msg)
925 ui.status(msg)
926 if dryrun:
926 if dryrun:
927 return 0
927 return 0
928
928
929 parents = self.dirstate.parents()
929 parents = self.dirstate.parents()
930 self.destroying()
930 self.destroying()
931 transaction.rollback(self.sopener, 'undo', ui.warn)
931 transaction.rollback(self.sopener, 'undo', ui.warn)
932 if self.vfs.exists('undo.bookmarks'):
932 if self.vfs.exists('undo.bookmarks'):
933 self.vfs.rename('undo.bookmarks', 'bookmarks')
933 self.vfs.rename('undo.bookmarks', 'bookmarks')
934 if self.svfs.exists('undo.phaseroots'):
934 if self.svfs.exists('undo.phaseroots'):
935 self.svfs.rename('undo.phaseroots', 'phaseroots')
935 self.svfs.rename('undo.phaseroots', 'phaseroots')
936 self.invalidate()
936 self.invalidate()
937
937
938 parentgone = (parents[0] not in self.changelog.nodemap or
938 parentgone = (parents[0] not in self.changelog.nodemap or
939 parents[1] not in self.changelog.nodemap)
939 parents[1] not in self.changelog.nodemap)
940 if parentgone:
940 if parentgone:
941 self.vfs.rename('undo.dirstate', 'dirstate')
941 self.vfs.rename('undo.dirstate', 'dirstate')
942 try:
942 try:
943 branch = self.opener.read('undo.branch')
943 branch = self.opener.read('undo.branch')
944 self.dirstate.setbranch(encoding.tolocal(branch))
944 self.dirstate.setbranch(encoding.tolocal(branch))
945 except IOError:
945 except IOError:
946 ui.warn(_('named branch could not be reset: '
946 ui.warn(_('named branch could not be reset: '
947 'current branch is still \'%s\'\n')
947 'current branch is still \'%s\'\n')
948 % self.dirstate.branch())
948 % self.dirstate.branch())
949
949
950 self.dirstate.invalidate()
950 self.dirstate.invalidate()
951 parents = tuple([p.rev() for p in self.parents()])
951 parents = tuple([p.rev() for p in self.parents()])
952 if len(parents) > 1:
952 if len(parents) > 1:
953 ui.status(_('working directory now based on '
953 ui.status(_('working directory now based on '
954 'revisions %d and %d\n') % parents)
954 'revisions %d and %d\n') % parents)
955 else:
955 else:
956 ui.status(_('working directory now based on '
956 ui.status(_('working directory now based on '
957 'revision %d\n') % parents)
957 'revision %d\n') % parents)
958 # TODO: if we know which new heads may result from this rollback, pass
958 # TODO: if we know which new heads may result from this rollback, pass
959 # them to destroy(), which will prevent the branchhead cache from being
959 # them to destroy(), which will prevent the branchhead cache from being
960 # invalidated.
960 # invalidated.
961 self.destroyed()
961 self.destroyed()
962 return 0
962 return 0
963
963
964 def invalidatecaches(self):
964 def invalidatecaches(self):
965
965
966 if '_tagscache' in vars(self):
966 if '_tagscache' in vars(self):
967 # can't use delattr on proxy
967 # can't use delattr on proxy
968 del self.__dict__['_tagscache']
968 del self.__dict__['_tagscache']
969
969
970 self.unfiltered()._branchcaches.clear()
970 self.unfiltered()._branchcaches.clear()
971 self.invalidatevolatilesets()
971 self.invalidatevolatilesets()
972
972
973 def invalidatevolatilesets(self):
973 def invalidatevolatilesets(self):
974 self.filteredrevcache.clear()
974 self.filteredrevcache.clear()
975 obsolete.clearobscaches(self)
975 obsolete.clearobscaches(self)
976
976
977 def invalidatedirstate(self):
977 def invalidatedirstate(self):
978 '''Invalidates the dirstate, causing the next call to dirstate
978 '''Invalidates the dirstate, causing the next call to dirstate
979 to check if it was modified since the last time it was read,
979 to check if it was modified since the last time it was read,
980 rereading it if it has.
980 rereading it if it has.
981
981
982 This is different to dirstate.invalidate() that it doesn't always
982 This is different to dirstate.invalidate() that it doesn't always
983 rereads the dirstate. Use dirstate.invalidate() if you want to
983 rereads the dirstate. Use dirstate.invalidate() if you want to
984 explicitly read the dirstate again (i.e. restoring it to a previous
984 explicitly read the dirstate again (i.e. restoring it to a previous
985 known good state).'''
985 known good state).'''
986 if hasunfilteredcache(self, 'dirstate'):
986 if hasunfilteredcache(self, 'dirstate'):
987 for k in self.dirstate._filecache:
987 for k in self.dirstate._filecache:
988 try:
988 try:
989 delattr(self.dirstate, k)
989 delattr(self.dirstate, k)
990 except AttributeError:
990 except AttributeError:
991 pass
991 pass
992 delattr(self.unfiltered(), 'dirstate')
992 delattr(self.unfiltered(), 'dirstate')
993
993
994 def invalidate(self):
994 def invalidate(self):
995 unfiltered = self.unfiltered() # all file caches are stored unfiltered
995 unfiltered = self.unfiltered() # all file caches are stored unfiltered
996 for k in self._filecache:
996 for k in self._filecache:
997 # dirstate is invalidated separately in invalidatedirstate()
997 # dirstate is invalidated separately in invalidatedirstate()
998 if k == 'dirstate':
998 if k == 'dirstate':
999 continue
999 continue
1000
1000
1001 try:
1001 try:
1002 delattr(unfiltered, k)
1002 delattr(unfiltered, k)
1003 except AttributeError:
1003 except AttributeError:
1004 pass
1004 pass
1005 self.invalidatecaches()
1005 self.invalidatecaches()
1006 self.store.invalidatecaches()
1006 self.store.invalidatecaches()
1007
1007
1008 def invalidateall(self):
1008 def invalidateall(self):
1009 '''Fully invalidates both store and non-store parts, causing the
1009 '''Fully invalidates both store and non-store parts, causing the
1010 subsequent operation to reread any outside changes.'''
1010 subsequent operation to reread any outside changes.'''
1011 # extension should hook this to invalidate its caches
1011 # extension should hook this to invalidate its caches
1012 self.invalidate()
1012 self.invalidate()
1013 self.invalidatedirstate()
1013 self.invalidatedirstate()
1014
1014
1015 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1015 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1016 try:
1016 try:
1017 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1017 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1018 except error.LockHeld, inst:
1018 except error.LockHeld, inst:
1019 if not wait:
1019 if not wait:
1020 raise
1020 raise
1021 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1021 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1022 (desc, inst.locker))
1022 (desc, inst.locker))
1023 # default to 600 seconds timeout
1023 # default to 600 seconds timeout
1024 l = lockmod.lock(vfs, lockname,
1024 l = lockmod.lock(vfs, lockname,
1025 int(self.ui.config("ui", "timeout", "600")),
1025 int(self.ui.config("ui", "timeout", "600")),
1026 releasefn, desc=desc)
1026 releasefn, desc=desc)
1027 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1027 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1028 if acquirefn:
1028 if acquirefn:
1029 acquirefn()
1029 acquirefn()
1030 return l
1030 return l
1031
1031
1032 def _afterlock(self, callback):
1032 def _afterlock(self, callback):
1033 """add a callback to the current repository lock.
1033 """add a callback to the current repository lock.
1034
1034
1035 The callback will be executed on lock release."""
1035 The callback will be executed on lock release."""
1036 l = self._lockref and self._lockref()
1036 l = self._lockref and self._lockref()
1037 if l:
1037 if l:
1038 l.postrelease.append(callback)
1038 l.postrelease.append(callback)
1039 else:
1039 else:
1040 callback()
1040 callback()
1041
1041
1042 def lock(self, wait=True):
1042 def lock(self, wait=True):
1043 '''Lock the repository store (.hg/store) and return a weak reference
1043 '''Lock the repository store (.hg/store) and return a weak reference
1044 to the lock. Use this before modifying the store (e.g. committing or
1044 to the lock. Use this before modifying the store (e.g. committing or
1045 stripping). If you are opening a transaction, get a lock as well.)'''
1045 stripping). If you are opening a transaction, get a lock as well.)'''
1046 l = self._lockref and self._lockref()
1046 l = self._lockref and self._lockref()
1047 if l is not None and l.held:
1047 if l is not None and l.held:
1048 l.lock()
1048 l.lock()
1049 return l
1049 return l
1050
1050
1051 def unlock():
1051 def unlock():
1052 if hasunfilteredcache(self, '_phasecache'):
1052 if hasunfilteredcache(self, '_phasecache'):
1053 self._phasecache.write()
1053 self._phasecache.write()
1054 for k, ce in self._filecache.items():
1054 for k, ce in self._filecache.items():
1055 if k == 'dirstate' or k not in self.__dict__:
1055 if k == 'dirstate' or k not in self.__dict__:
1056 continue
1056 continue
1057 ce.refresh()
1057 ce.refresh()
1058
1058
1059 l = self._lock(self.svfs, "lock", wait, unlock,
1059 l = self._lock(self.svfs, "lock", wait, unlock,
1060 self.invalidate, _('repository %s') % self.origroot)
1060 self.invalidate, _('repository %s') % self.origroot)
1061 self._lockref = weakref.ref(l)
1061 self._lockref = weakref.ref(l)
1062 return l
1062 return l
1063
1063
1064 def wlock(self, wait=True):
1064 def wlock(self, wait=True):
1065 '''Lock the non-store parts of the repository (everything under
1065 '''Lock the non-store parts of the repository (everything under
1066 .hg except .hg/store) and return a weak reference to the lock.
1066 .hg except .hg/store) and return a weak reference to the lock.
1067 Use this before modifying files in .hg.'''
1067 Use this before modifying files in .hg.'''
1068 l = self._wlockref and self._wlockref()
1068 l = self._wlockref and self._wlockref()
1069 if l is not None and l.held:
1069 if l is not None and l.held:
1070 l.lock()
1070 l.lock()
1071 return l
1071 return l
1072
1072
1073 def unlock():
1073 def unlock():
1074 self.dirstate.write()
1074 self.dirstate.write()
1075 self._filecache['dirstate'].refresh()
1075 self._filecache['dirstate'].refresh()
1076
1076
1077 l = self._lock(self.vfs, "wlock", wait, unlock,
1077 l = self._lock(self.vfs, "wlock", wait, unlock,
1078 self.invalidatedirstate, _('working directory of %s') %
1078 self.invalidatedirstate, _('working directory of %s') %
1079 self.origroot)
1079 self.origroot)
1080 self._wlockref = weakref.ref(l)
1080 self._wlockref = weakref.ref(l)
1081 return l
1081 return l
1082
1082
1083 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1083 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1084 """
1084 """
1085 commit an individual file as part of a larger transaction
1085 commit an individual file as part of a larger transaction
1086 """
1086 """
1087
1087
1088 fname = fctx.path()
1088 fname = fctx.path()
1089 text = fctx.data()
1089 text = fctx.data()
1090 flog = self.file(fname)
1090 flog = self.file(fname)
1091 fparent1 = manifest1.get(fname, nullid)
1091 fparent1 = manifest1.get(fname, nullid)
1092 fparent2 = fparent2o = manifest2.get(fname, nullid)
1092 fparent2 = fparent2o = manifest2.get(fname, nullid)
1093
1093
1094 meta = {}
1094 meta = {}
1095 copy = fctx.renamed()
1095 copy = fctx.renamed()
1096 if copy and copy[0] != fname:
1096 if copy and copy[0] != fname:
1097 # Mark the new revision of this file as a copy of another
1097 # Mark the new revision of this file as a copy of another
1098 # file. This copy data will effectively act as a parent
1098 # file. This copy data will effectively act as a parent
1099 # of this new revision. If this is a merge, the first
1099 # of this new revision. If this is a merge, the first
1100 # parent will be the nullid (meaning "look up the copy data")
1100 # parent will be the nullid (meaning "look up the copy data")
1101 # and the second one will be the other parent. For example:
1101 # and the second one will be the other parent. For example:
1102 #
1102 #
1103 # 0 --- 1 --- 3 rev1 changes file foo
1103 # 0 --- 1 --- 3 rev1 changes file foo
1104 # \ / rev2 renames foo to bar and changes it
1104 # \ / rev2 renames foo to bar and changes it
1105 # \- 2 -/ rev3 should have bar with all changes and
1105 # \- 2 -/ rev3 should have bar with all changes and
1106 # should record that bar descends from
1106 # should record that bar descends from
1107 # bar in rev2 and foo in rev1
1107 # bar in rev2 and foo in rev1
1108 #
1108 #
1109 # this allows this merge to succeed:
1109 # this allows this merge to succeed:
1110 #
1110 #
1111 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1111 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1112 # \ / merging rev3 and rev4 should use bar@rev2
1112 # \ / merging rev3 and rev4 should use bar@rev2
1113 # \- 2 --- 4 as the merge base
1113 # \- 2 --- 4 as the merge base
1114 #
1114 #
1115
1115
1116 cfname = copy[0]
1116 cfname = copy[0]
1117 crev = manifest1.get(cfname)
1117 crev = manifest1.get(cfname)
1118 newfparent = fparent2
1118 newfparent = fparent2
1119
1119
1120 if manifest2: # branch merge
1120 if manifest2: # branch merge
1121 if fparent2 == nullid or crev is None: # copied on remote side
1121 if fparent2 == nullid or crev is None: # copied on remote side
1122 if cfname in manifest2:
1122 if cfname in manifest2:
1123 crev = manifest2[cfname]
1123 crev = manifest2[cfname]
1124 newfparent = fparent1
1124 newfparent = fparent1
1125
1125
1126 # find source in nearest ancestor if we've lost track
1126 # find source in nearest ancestor if we've lost track
1127 if not crev:
1127 if not crev:
1128 self.ui.debug(" %s: searching for copy revision for %s\n" %
1128 self.ui.debug(" %s: searching for copy revision for %s\n" %
1129 (fname, cfname))
1129 (fname, cfname))
1130 for ancestor in self[None].ancestors():
1130 for ancestor in self[None].ancestors():
1131 if cfname in ancestor:
1131 if cfname in ancestor:
1132 crev = ancestor[cfname].filenode()
1132 crev = ancestor[cfname].filenode()
1133 break
1133 break
1134
1134
1135 if crev:
1135 if crev:
1136 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1136 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1137 meta["copy"] = cfname
1137 meta["copy"] = cfname
1138 meta["copyrev"] = hex(crev)
1138 meta["copyrev"] = hex(crev)
1139 fparent1, fparent2 = nullid, newfparent
1139 fparent1, fparent2 = nullid, newfparent
1140 else:
1140 else:
1141 self.ui.warn(_("warning: can't find ancestor for '%s' "
1141 self.ui.warn(_("warning: can't find ancestor for '%s' "
1142 "copied from '%s'!\n") % (fname, cfname))
1142 "copied from '%s'!\n") % (fname, cfname))
1143
1143
1144 elif fparent1 == nullid:
1144 elif fparent1 == nullid:
1145 fparent1, fparent2 = fparent2, nullid
1145 fparent1, fparent2 = fparent2, nullid
1146 elif fparent2 != nullid:
1146 elif fparent2 != nullid:
1147 # is one parent an ancestor of the other?
1147 # is one parent an ancestor of the other?
1148 fparentancestor = flog.ancestor(fparent1, fparent2)
1148 fparentancestor = flog.ancestor(fparent1, fparent2)
1149 if fparentancestor == fparent1:
1149 if fparentancestor == fparent1:
1150 fparent1, fparent2 = fparent2, nullid
1150 fparent1, fparent2 = fparent2, nullid
1151 elif fparentancestor == fparent2:
1151 elif fparentancestor == fparent2:
1152 fparent2 = nullid
1152 fparent2 = nullid
1153
1153
1154 # is the file changed?
1154 # is the file changed?
1155 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1155 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1156 changelist.append(fname)
1156 changelist.append(fname)
1157 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1157 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1158
1158
1159 # are just the flags changed during merge?
1159 # are just the flags changed during merge?
1160 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1160 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1161 changelist.append(fname)
1161 changelist.append(fname)
1162
1162
1163 return fparent1
1163 return fparent1
1164
1164
1165 @unfilteredmethod
1165 @unfilteredmethod
1166 def commit(self, text="", user=None, date=None, match=None, force=False,
1166 def commit(self, text="", user=None, date=None, match=None, force=False,
1167 editor=False, extra={}):
1167 editor=False, extra={}):
1168 """Add a new revision to current repository.
1168 """Add a new revision to current repository.
1169
1169
1170 Revision information is gathered from the working directory,
1170 Revision information is gathered from the working directory,
1171 match can be used to filter the committed files. If editor is
1171 match can be used to filter the committed files. If editor is
1172 supplied, it is called to get a commit message.
1172 supplied, it is called to get a commit message.
1173 """
1173 """
1174
1174
1175 def fail(f, msg):
1175 def fail(f, msg):
1176 raise util.Abort('%s: %s' % (f, msg))
1176 raise util.Abort('%s: %s' % (f, msg))
1177
1177
1178 if not match:
1178 if not match:
1179 match = matchmod.always(self.root, '')
1179 match = matchmod.always(self.root, '')
1180
1180
1181 if not force:
1181 if not force:
1182 vdirs = []
1182 vdirs = []
1183 match.explicitdir = vdirs.append
1183 match.explicitdir = vdirs.append
1184 match.bad = fail
1184 match.bad = fail
1185
1185
1186 wlock = self.wlock()
1186 wlock = self.wlock()
1187 try:
1187 try:
1188 wctx = self[None]
1188 wctx = self[None]
1189 merge = len(wctx.parents()) > 1
1189 merge = len(wctx.parents()) > 1
1190
1190
1191 if (not force and merge and match and
1191 if (not force and merge and match and
1192 (match.files() or match.anypats())):
1192 (match.files() or match.anypats())):
1193 raise util.Abort(_('cannot partially commit a merge '
1193 raise util.Abort(_('cannot partially commit a merge '
1194 '(do not specify files or patterns)'))
1194 '(do not specify files or patterns)'))
1195
1195
1196 changes = self.status(match=match, clean=force)
1196 changes = self.status(match=match, clean=force)
1197 if force:
1197 if force:
1198 changes[0].extend(changes[6]) # mq may commit unchanged files
1198 changes[0].extend(changes[6]) # mq may commit unchanged files
1199
1199
1200 # check subrepos
1200 # check subrepos
1201 subs = []
1201 subs = []
1202 commitsubs = set()
1202 commitsubs = set()
1203 newstate = wctx.substate.copy()
1203 newstate = wctx.substate.copy()
1204 # only manage subrepos and .hgsubstate if .hgsub is present
1204 # only manage subrepos and .hgsubstate if .hgsub is present
1205 if '.hgsub' in wctx:
1205 if '.hgsub' in wctx:
1206 # we'll decide whether to track this ourselves, thanks
1206 # we'll decide whether to track this ourselves, thanks
1207 for c in changes[:3]:
1207 for c in changes[:3]:
1208 if '.hgsubstate' in c:
1208 if '.hgsubstate' in c:
1209 c.remove('.hgsubstate')
1209 c.remove('.hgsubstate')
1210
1210
1211 # compare current state to last committed state
1211 # compare current state to last committed state
1212 # build new substate based on last committed state
1212 # build new substate based on last committed state
1213 oldstate = wctx.p1().substate
1213 oldstate = wctx.p1().substate
1214 for s in sorted(newstate.keys()):
1214 for s in sorted(newstate.keys()):
1215 if not match(s):
1215 if not match(s):
1216 # ignore working copy, use old state if present
1216 # ignore working copy, use old state if present
1217 if s in oldstate:
1217 if s in oldstate:
1218 newstate[s] = oldstate[s]
1218 newstate[s] = oldstate[s]
1219 continue
1219 continue
1220 if not force:
1220 if not force:
1221 raise util.Abort(
1221 raise util.Abort(
1222 _("commit with new subrepo %s excluded") % s)
1222 _("commit with new subrepo %s excluded") % s)
1223 if wctx.sub(s).dirty(True):
1223 if wctx.sub(s).dirty(True):
1224 if not self.ui.configbool('ui', 'commitsubrepos'):
1224 if not self.ui.configbool('ui', 'commitsubrepos'):
1225 raise util.Abort(
1225 raise util.Abort(
1226 _("uncommitted changes in subrepo %s") % s,
1226 _("uncommitted changes in subrepo %s") % s,
1227 hint=_("use --subrepos for recursive commit"))
1227 hint=_("use --subrepos for recursive commit"))
1228 subs.append(s)
1228 subs.append(s)
1229 commitsubs.add(s)
1229 commitsubs.add(s)
1230 else:
1230 else:
1231 bs = wctx.sub(s).basestate()
1231 bs = wctx.sub(s).basestate()
1232 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1232 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1233 if oldstate.get(s, (None, None, None))[1] != bs:
1233 if oldstate.get(s, (None, None, None))[1] != bs:
1234 subs.append(s)
1234 subs.append(s)
1235
1235
1236 # check for removed subrepos
1236 # check for removed subrepos
1237 for p in wctx.parents():
1237 for p in wctx.parents():
1238 r = [s for s in p.substate if s not in newstate]
1238 r = [s for s in p.substate if s not in newstate]
1239 subs += [s for s in r if match(s)]
1239 subs += [s for s in r if match(s)]
1240 if subs:
1240 if subs:
1241 if (not match('.hgsub') and
1241 if (not match('.hgsub') and
1242 '.hgsub' in (wctx.modified() + wctx.added())):
1242 '.hgsub' in (wctx.modified() + wctx.added())):
1243 raise util.Abort(
1243 raise util.Abort(
1244 _("can't commit subrepos without .hgsub"))
1244 _("can't commit subrepos without .hgsub"))
1245 changes[0].insert(0, '.hgsubstate')
1245 changes[0].insert(0, '.hgsubstate')
1246
1246
1247 elif '.hgsub' in changes[2]:
1247 elif '.hgsub' in changes[2]:
1248 # clean up .hgsubstate when .hgsub is removed
1248 # clean up .hgsubstate when .hgsub is removed
1249 if ('.hgsubstate' in wctx and
1249 if ('.hgsubstate' in wctx and
1250 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1250 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1251 changes[2].insert(0, '.hgsubstate')
1251 changes[2].insert(0, '.hgsubstate')
1252
1252
1253 # make sure all explicit patterns are matched
1253 # make sure all explicit patterns are matched
1254 if not force and match.files():
1254 if not force and match.files():
1255 matched = set(changes[0] + changes[1] + changes[2])
1255 matched = set(changes[0] + changes[1] + changes[2])
1256
1256
1257 for f in match.files():
1257 for f in match.files():
1258 f = self.dirstate.normalize(f)
1258 f = self.dirstate.normalize(f)
1259 if f == '.' or f in matched or f in wctx.substate:
1259 if f == '.' or f in matched or f in wctx.substate:
1260 continue
1260 continue
1261 if f in changes[3]: # missing
1261 if f in changes[3]: # missing
1262 fail(f, _('file not found!'))
1262 fail(f, _('file not found!'))
1263 if f in vdirs: # visited directory
1263 if f in vdirs: # visited directory
1264 d = f + '/'
1264 d = f + '/'
1265 for mf in matched:
1265 for mf in matched:
1266 if mf.startswith(d):
1266 if mf.startswith(d):
1267 break
1267 break
1268 else:
1268 else:
1269 fail(f, _("no match under directory!"))
1269 fail(f, _("no match under directory!"))
1270 elif f not in self.dirstate:
1270 elif f not in self.dirstate:
1271 fail(f, _("file not tracked!"))
1271 fail(f, _("file not tracked!"))
1272
1272
1273 cctx = context.workingctx(self, text, user, date, extra, changes)
1273 cctx = context.workingctx(self, text, user, date, extra, changes)
1274
1274
1275 if (not force and not extra.get("close") and not merge
1275 if (not force and not extra.get("close") and not merge
1276 and not cctx.files()
1276 and not cctx.files()
1277 and wctx.branch() == wctx.p1().branch()):
1277 and wctx.branch() == wctx.p1().branch()):
1278 return None
1278 return None
1279
1279
1280 if merge and cctx.deleted():
1280 if merge and cctx.deleted():
1281 raise util.Abort(_("cannot commit merge with missing files"))
1281 raise util.Abort(_("cannot commit merge with missing files"))
1282
1282
1283 ms = mergemod.mergestate(self)
1283 ms = mergemod.mergestate(self)
1284 for f in changes[0]:
1284 for f in changes[0]:
1285 if f in ms and ms[f] == 'u':
1285 if f in ms and ms[f] == 'u':
1286 raise util.Abort(_("unresolved merge conflicts "
1286 raise util.Abort(_("unresolved merge conflicts "
1287 "(see hg help resolve)"))
1287 "(see hg help resolve)"))
1288
1288
1289 if editor:
1289 if editor:
1290 cctx._text = editor(self, cctx, subs)
1290 cctx._text = editor(self, cctx, subs)
1291 edited = (text != cctx._text)
1291 edited = (text != cctx._text)
1292
1292
1293 # Save commit message in case this transaction gets rolled back
1293 # Save commit message in case this transaction gets rolled back
1294 # (e.g. by a pretxncommit hook). Leave the content alone on
1294 # (e.g. by a pretxncommit hook). Leave the content alone on
1295 # the assumption that the user will use the same editor again.
1295 # the assumption that the user will use the same editor again.
1296 msgfn = self.savecommitmessage(cctx._text)
1296 msgfn = self.savecommitmessage(cctx._text)
1297
1297
1298 # commit subs and write new state
1298 # commit subs and write new state
1299 if subs:
1299 if subs:
1300 for s in sorted(commitsubs):
1300 for s in sorted(commitsubs):
1301 sub = wctx.sub(s)
1301 sub = wctx.sub(s)
1302 self.ui.status(_('committing subrepository %s\n') %
1302 self.ui.status(_('committing subrepository %s\n') %
1303 subrepo.subrelpath(sub))
1303 subrepo.subrelpath(sub))
1304 sr = sub.commit(cctx._text, user, date)
1304 sr = sub.commit(cctx._text, user, date)
1305 newstate[s] = (newstate[s][0], sr)
1305 newstate[s] = (newstate[s][0], sr)
1306 subrepo.writestate(self, newstate)
1306 subrepo.writestate(self, newstate)
1307
1307
1308 p1, p2 = self.dirstate.parents()
1308 p1, p2 = self.dirstate.parents()
1309 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1309 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1310 try:
1310 try:
1311 self.hook("precommit", throw=True, parent1=hookp1,
1311 self.hook("precommit", throw=True, parent1=hookp1,
1312 parent2=hookp2)
1312 parent2=hookp2)
1313 ret = self.commitctx(cctx, True)
1313 ret = self.commitctx(cctx, True)
1314 except: # re-raises
1314 except: # re-raises
1315 if edited:
1315 if edited:
1316 self.ui.write(
1316 self.ui.write(
1317 _('note: commit message saved in %s\n') % msgfn)
1317 _('note: commit message saved in %s\n') % msgfn)
1318 raise
1318 raise
1319
1319
1320 # update bookmarks, dirstate and mergestate
1320 # update bookmarks, dirstate and mergestate
1321 bookmarks.update(self, [p1, p2], ret)
1321 bookmarks.update(self, [p1, p2], ret)
1322 cctx.markcommitted(ret)
1322 cctx.markcommitted(ret)
1323 ms.reset()
1323 ms.reset()
1324 finally:
1324 finally:
1325 wlock.release()
1325 wlock.release()
1326
1326
1327 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1327 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1328 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1328 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1329 self._afterlock(commithook)
1329 self._afterlock(commithook)
1330 return ret
1330 return ret
1331
1331
1332 @unfilteredmethod
1332 @unfilteredmethod
1333 def commitctx(self, ctx, error=False):
1333 def commitctx(self, ctx, error=False):
1334 """Add a new revision to current repository.
1334 """Add a new revision to current repository.
1335 Revision information is passed via the context argument.
1335 Revision information is passed via the context argument.
1336 """
1336 """
1337
1337
1338 tr = lock = None
1338 tr = lock = None
1339 removed = list(ctx.removed())
1339 removed = list(ctx.removed())
1340 p1, p2 = ctx.p1(), ctx.p2()
1340 p1, p2 = ctx.p1(), ctx.p2()
1341 user = ctx.user()
1341 user = ctx.user()
1342
1342
1343 lock = self.lock()
1343 lock = self.lock()
1344 try:
1344 try:
1345 tr = self.transaction("commit")
1345 tr = self.transaction("commit")
1346 trp = weakref.proxy(tr)
1346 trp = weakref.proxy(tr)
1347
1347
1348 if ctx.files():
1348 if ctx.files():
1349 m1 = p1.manifest().copy()
1349 m1 = p1.manifest().copy()
1350 m2 = p2.manifest()
1350 m2 = p2.manifest()
1351
1351
1352 # check in files
1352 # check in files
1353 new = {}
1353 new = {}
1354 changed = []
1354 changed = []
1355 linkrev = len(self)
1355 linkrev = len(self)
1356 for f in sorted(ctx.modified() + ctx.added()):
1356 for f in sorted(ctx.modified() + ctx.added()):
1357 self.ui.note(f + "\n")
1357 self.ui.note(f + "\n")
1358 try:
1358 try:
1359 fctx = ctx[f]
1359 fctx = ctx[f]
1360 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1360 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1361 changed)
1361 changed)
1362 m1.set(f, fctx.flags())
1362 m1.set(f, fctx.flags())
1363 except OSError, inst:
1363 except OSError, inst:
1364 self.ui.warn(_("trouble committing %s!\n") % f)
1364 self.ui.warn(_("trouble committing %s!\n") % f)
1365 raise
1365 raise
1366 except IOError, inst:
1366 except IOError, inst:
1367 errcode = getattr(inst, 'errno', errno.ENOENT)
1367 errcode = getattr(inst, 'errno', errno.ENOENT)
1368 if error or errcode and errcode != errno.ENOENT:
1368 if error or errcode and errcode != errno.ENOENT:
1369 self.ui.warn(_("trouble committing %s!\n") % f)
1369 self.ui.warn(_("trouble committing %s!\n") % f)
1370 raise
1370 raise
1371 else:
1371 else:
1372 removed.append(f)
1372 removed.append(f)
1373
1373
1374 # update manifest
1374 # update manifest
1375 m1.update(new)
1375 m1.update(new)
1376 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1376 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1377 drop = [f for f in removed if f in m1]
1377 drop = [f for f in removed if f in m1]
1378 for f in drop:
1378 for f in drop:
1379 del m1[f]
1379 del m1[f]
1380 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1380 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1381 p2.manifestnode(), (new, drop))
1381 p2.manifestnode(), (new, drop))
1382 files = changed + removed
1382 files = changed + removed
1383 else:
1383 else:
1384 mn = p1.manifestnode()
1384 mn = p1.manifestnode()
1385 files = []
1385 files = []
1386
1386
1387 # update changelog
1387 # update changelog
1388 self.changelog.delayupdate()
1388 self.changelog.delayupdate()
1389 n = self.changelog.add(mn, files, ctx.description(),
1389 n = self.changelog.add(mn, files, ctx.description(),
1390 trp, p1.node(), p2.node(),
1390 trp, p1.node(), p2.node(),
1391 user, ctx.date(), ctx.extra().copy())
1391 user, ctx.date(), ctx.extra().copy())
1392 p = lambda: self.changelog.writepending() and self.root or ""
1392 p = lambda: self.changelog.writepending() and self.root or ""
1393 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1393 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1394 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1394 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1395 parent2=xp2, pending=p)
1395 parent2=xp2, pending=p)
1396 self.changelog.finalize(trp)
1396 self.changelog.finalize(trp)
1397 # set the new commit is proper phase
1397 # set the new commit is proper phase
1398 targetphase = subrepo.newcommitphase(self.ui, ctx)
1398 targetphase = subrepo.newcommitphase(self.ui, ctx)
1399 if targetphase:
1399 if targetphase:
1400 # retract boundary do not alter parent changeset.
1400 # retract boundary do not alter parent changeset.
1401 # if a parent have higher the resulting phase will
1401 # if a parent have higher the resulting phase will
1402 # be compliant anyway
1402 # be compliant anyway
1403 #
1403 #
1404 # if minimal phase was 0 we don't need to retract anything
1404 # if minimal phase was 0 we don't need to retract anything
1405 phases.retractboundary(self, targetphase, [n])
1405 phases.retractboundary(self, targetphase, [n])
1406 tr.close()
1406 tr.close()
1407 branchmap.updatecache(self.filtered('served'))
1407 branchmap.updatecache(self.filtered('served'))
1408 return n
1408 return n
1409 finally:
1409 finally:
1410 if tr:
1410 if tr:
1411 tr.release()
1411 tr.release()
1412 lock.release()
1412 lock.release()
1413
1413
1414 @unfilteredmethod
1414 @unfilteredmethod
1415 def destroying(self):
1415 def destroying(self):
1416 '''Inform the repository that nodes are about to be destroyed.
1416 '''Inform the repository that nodes are about to be destroyed.
1417 Intended for use by strip and rollback, so there's a common
1417 Intended for use by strip and rollback, so there's a common
1418 place for anything that has to be done before destroying history.
1418 place for anything that has to be done before destroying history.
1419
1419
1420 This is mostly useful for saving state that is in memory and waiting
1420 This is mostly useful for saving state that is in memory and waiting
1421 to be flushed when the current lock is released. Because a call to
1421 to be flushed when the current lock is released. Because a call to
1422 destroyed is imminent, the repo will be invalidated causing those
1422 destroyed is imminent, the repo will be invalidated causing those
1423 changes to stay in memory (waiting for the next unlock), or vanish
1423 changes to stay in memory (waiting for the next unlock), or vanish
1424 completely.
1424 completely.
1425 '''
1425 '''
1426 # When using the same lock to commit and strip, the phasecache is left
1426 # When using the same lock to commit and strip, the phasecache is left
1427 # dirty after committing. Then when we strip, the repo is invalidated,
1427 # dirty after committing. Then when we strip, the repo is invalidated,
1428 # causing those changes to disappear.
1428 # causing those changes to disappear.
1429 if '_phasecache' in vars(self):
1429 if '_phasecache' in vars(self):
1430 self._phasecache.write()
1430 self._phasecache.write()
1431
1431
1432 @unfilteredmethod
1432 @unfilteredmethod
1433 def destroyed(self):
1433 def destroyed(self):
1434 '''Inform the repository that nodes have been destroyed.
1434 '''Inform the repository that nodes have been destroyed.
1435 Intended for use by strip and rollback, so there's a common
1435 Intended for use by strip and rollback, so there's a common
1436 place for anything that has to be done after destroying history.
1436 place for anything that has to be done after destroying history.
1437 '''
1437 '''
1438 # When one tries to:
1438 # When one tries to:
1439 # 1) destroy nodes thus calling this method (e.g. strip)
1439 # 1) destroy nodes thus calling this method (e.g. strip)
1440 # 2) use phasecache somewhere (e.g. commit)
1440 # 2) use phasecache somewhere (e.g. commit)
1441 #
1441 #
1442 # then 2) will fail because the phasecache contains nodes that were
1442 # then 2) will fail because the phasecache contains nodes that were
1443 # removed. We can either remove phasecache from the filecache,
1443 # removed. We can either remove phasecache from the filecache,
1444 # causing it to reload next time it is accessed, or simply filter
1444 # causing it to reload next time it is accessed, or simply filter
1445 # the removed nodes now and write the updated cache.
1445 # the removed nodes now and write the updated cache.
1446 self._phasecache.filterunknown(self)
1446 self._phasecache.filterunknown(self)
1447 self._phasecache.write()
1447 self._phasecache.write()
1448
1448
1449 # update the 'served' branch cache to help read only server process
1449 # update the 'served' branch cache to help read only server process
1450 # Thanks to branchcache collaboration this is done from the nearest
1450 # Thanks to branchcache collaboration this is done from the nearest
1451 # filtered subset and it is expected to be fast.
1451 # filtered subset and it is expected to be fast.
1452 branchmap.updatecache(self.filtered('served'))
1452 branchmap.updatecache(self.filtered('served'))
1453
1453
1454 # Ensure the persistent tag cache is updated. Doing it now
1454 # Ensure the persistent tag cache is updated. Doing it now
1455 # means that the tag cache only has to worry about destroyed
1455 # means that the tag cache only has to worry about destroyed
1456 # heads immediately after a strip/rollback. That in turn
1456 # heads immediately after a strip/rollback. That in turn
1457 # guarantees that "cachetip == currenttip" (comparing both rev
1457 # guarantees that "cachetip == currenttip" (comparing both rev
1458 # and node) always means no nodes have been added or destroyed.
1458 # and node) always means no nodes have been added or destroyed.
1459
1459
1460 # XXX this is suboptimal when qrefresh'ing: we strip the current
1460 # XXX this is suboptimal when qrefresh'ing: we strip the current
1461 # head, refresh the tag cache, then immediately add a new head.
1461 # head, refresh the tag cache, then immediately add a new head.
1462 # But I think doing it this way is necessary for the "instant
1462 # But I think doing it this way is necessary for the "instant
1463 # tag cache retrieval" case to work.
1463 # tag cache retrieval" case to work.
1464 self.invalidate()
1464 self.invalidate()
1465
1465
1466 def walk(self, match, node=None):
1466 def walk(self, match, node=None):
1467 '''
1467 '''
1468 walk recursively through the directory tree or a given
1468 walk recursively through the directory tree or a given
1469 changeset, finding all files matched by the match
1469 changeset, finding all files matched by the match
1470 function
1470 function
1471 '''
1471 '''
1472 return self[node].walk(match)
1472 return self[node].walk(match)
1473
1473
1474 def status(self, node1='.', node2=None, match=None,
1474 def status(self, node1='.', node2=None, match=None,
1475 ignored=False, clean=False, unknown=False,
1475 ignored=False, clean=False, unknown=False,
1476 listsubrepos=False):
1476 listsubrepos=False):
1477 """return status of files between two nodes or node and working
1477 """return status of files between two nodes or node and working
1478 directory.
1478 directory.
1479
1479
1480 If node1 is None, use the first dirstate parent instead.
1480 If node1 is None, use the first dirstate parent instead.
1481 If node2 is None, compare node1 with working directory.
1481 If node2 is None, compare node1 with working directory.
1482 """
1482 """
1483
1483
1484 def mfmatches(ctx):
1484 def mfmatches(ctx):
1485 mf = ctx.manifest().copy()
1485 mf = ctx.manifest().copy()
1486 if match.always():
1486 if match.always():
1487 return mf
1487 return mf
1488 for fn in mf.keys():
1488 for fn in mf.keys():
1489 if not match(fn):
1489 if not match(fn):
1490 del mf[fn]
1490 del mf[fn]
1491 return mf
1491 return mf
1492
1492
1493 ctx1 = self[node1]
1493 ctx1 = self[node1]
1494 ctx2 = self[node2]
1494 ctx2 = self[node2]
1495
1495
1496 working = ctx2.rev() is None
1496 working = ctx2.rev() is None
1497 parentworking = working and ctx1 == self['.']
1497 parentworking = working and ctx1 == self['.']
1498 match = match or matchmod.always(self.root, self.getcwd())
1498 match = match or matchmod.always(self.root, self.getcwd())
1499 listignored, listclean, listunknown = ignored, clean, unknown
1499 listignored, listclean, listunknown = ignored, clean, unknown
1500
1500
1501 # load earliest manifest first for caching reasons
1501 # load earliest manifest first for caching reasons
1502 if not working and ctx2.rev() < ctx1.rev():
1502 if not working and ctx2.rev() < ctx1.rev():
1503 ctx2.manifest()
1503 ctx2.manifest()
1504
1504
1505 if not parentworking:
1505 if not parentworking:
1506 def bad(f, msg):
1506 def bad(f, msg):
1507 # 'f' may be a directory pattern from 'match.files()',
1507 # 'f' may be a directory pattern from 'match.files()',
1508 # so 'f not in ctx1' is not enough
1508 # so 'f not in ctx1' is not enough
1509 if f not in ctx1 and f not in ctx1.dirs():
1509 if f not in ctx1 and f not in ctx1.dirs():
1510 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1510 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1511 match.bad = bad
1511 match.bad = bad
1512
1512
1513 if working: # we need to scan the working dir
1513 if working: # we need to scan the working dir
1514 subrepos = []
1514 subrepos = []
1515 if '.hgsub' in self.dirstate:
1515 if '.hgsub' in self.dirstate:
1516 subrepos = sorted(ctx2.substate)
1516 subrepos = sorted(ctx2.substate)
1517 s = self.dirstate.status(match, subrepos, listignored,
1517 s = self.dirstate.status(match, subrepos, listignored,
1518 listclean, listunknown)
1518 listclean, listunknown)
1519 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1519 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1520
1520
1521 # check for any possibly clean files
1521 # check for any possibly clean files
1522 if parentworking and cmp:
1522 if parentworking and cmp:
1523 fixup = []
1523 fixup = []
1524 # do a full compare of any files that might have changed
1524 # do a full compare of any files that might have changed
1525 for f in sorted(cmp):
1525 for f in sorted(cmp):
1526 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1526 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1527 or ctx1[f].cmp(ctx2[f])):
1527 or ctx1[f].cmp(ctx2[f])):
1528 modified.append(f)
1528 modified.append(f)
1529 else:
1529 else:
1530 fixup.append(f)
1530 fixup.append(f)
1531
1531
1532 # update dirstate for files that are actually clean
1532 # update dirstate for files that are actually clean
1533 if fixup:
1533 if fixup:
1534 if listclean:
1534 if listclean:
1535 clean += fixup
1535 clean += fixup
1536
1536
1537 try:
1537 try:
1538 # updating the dirstate is optional
1538 # updating the dirstate is optional
1539 # so we don't wait on the lock
1539 # so we don't wait on the lock
1540 wlock = self.wlock(False)
1540 wlock = self.wlock(False)
1541 try:
1541 try:
1542 for f in fixup:
1542 for f in fixup:
1543 self.dirstate.normal(f)
1543 self.dirstate.normal(f)
1544 finally:
1544 finally:
1545 wlock.release()
1545 wlock.release()
1546 except error.LockError:
1546 except error.LockError:
1547 pass
1547 pass
1548
1548
1549 if not parentworking:
1549 if not parentworking:
1550 mf1 = mfmatches(ctx1)
1550 mf1 = mfmatches(ctx1)
1551 if working:
1551 if working:
1552 # we are comparing working dir against non-parent
1552 # we are comparing working dir against non-parent
1553 # generate a pseudo-manifest for the working dir
1553 # generate a pseudo-manifest for the working dir
1554 mf2 = mfmatches(self['.'])
1554 mf2 = mfmatches(self['.'])
1555 for f in cmp + modified + added:
1555 for f in cmp + modified + added:
1556 mf2[f] = None
1556 mf2[f] = None
1557 mf2.set(f, ctx2.flags(f))
1557 mf2.set(f, ctx2.flags(f))
1558 for f in removed:
1558 for f in removed:
1559 if f in mf2:
1559 if f in mf2:
1560 del mf2[f]
1560 del mf2[f]
1561 else:
1561 else:
1562 # we are comparing two revisions
1562 # we are comparing two revisions
1563 deleted, unknown, ignored = [], [], []
1563 deleted, unknown, ignored = [], [], []
1564 mf2 = mfmatches(ctx2)
1564 mf2 = mfmatches(ctx2)
1565
1565
1566 modified, added, clean = [], [], []
1566 modified, added, clean = [], [], []
1567 withflags = mf1.withflags() | mf2.withflags()
1567 withflags = mf1.withflags() | mf2.withflags()
1568 for fn, mf2node in mf2.iteritems():
1568 for fn, mf2node in mf2.iteritems():
1569 if fn in mf1:
1569 if fn in mf1:
1570 if (fn not in deleted and
1570 if (fn not in deleted and
1571 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1571 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1572 (mf1[fn] != mf2node and
1572 (mf1[fn] != mf2node and
1573 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1573 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1574 modified.append(fn)
1574 modified.append(fn)
1575 elif listclean:
1575 elif listclean:
1576 clean.append(fn)
1576 clean.append(fn)
1577 del mf1[fn]
1577 del mf1[fn]
1578 elif fn not in deleted:
1578 elif fn not in deleted:
1579 added.append(fn)
1579 added.append(fn)
1580 removed = mf1.keys()
1580 removed = mf1.keys()
1581
1581
1582 if working and modified and not self.dirstate._checklink:
1582 if working and modified and not self.dirstate._checklink:
1583 # Symlink placeholders may get non-symlink-like contents
1583 # Symlink placeholders may get non-symlink-like contents
1584 # via user error or dereferencing by NFS or Samba servers,
1584 # via user error or dereferencing by NFS or Samba servers,
1585 # so we filter out any placeholders that don't look like a
1585 # so we filter out any placeholders that don't look like a
1586 # symlink
1586 # symlink
1587 sane = []
1587 sane = []
1588 for f in modified:
1588 for f in modified:
1589 if ctx2.flags(f) == 'l':
1589 if ctx2.flags(f) == 'l':
1590 d = ctx2[f].data()
1590 d = ctx2[f].data()
1591 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1591 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1592 self.ui.debug('ignoring suspect symlink placeholder'
1592 self.ui.debug('ignoring suspect symlink placeholder'
1593 ' "%s"\n' % f)
1593 ' "%s"\n' % f)
1594 continue
1594 continue
1595 sane.append(f)
1595 sane.append(f)
1596 modified = sane
1596 modified = sane
1597
1597
1598 r = modified, added, removed, deleted, unknown, ignored, clean
1598 r = modified, added, removed, deleted, unknown, ignored, clean
1599
1599
1600 if listsubrepos:
1600 if listsubrepos:
1601 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1601 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1602 if working:
1602 if working:
1603 rev2 = None
1603 rev2 = None
1604 else:
1604 else:
1605 rev2 = ctx2.substate[subpath][1]
1605 rev2 = ctx2.substate[subpath][1]
1606 try:
1606 try:
1607 submatch = matchmod.narrowmatcher(subpath, match)
1607 submatch = matchmod.narrowmatcher(subpath, match)
1608 s = sub.status(rev2, match=submatch, ignored=listignored,
1608 s = sub.status(rev2, match=submatch, ignored=listignored,
1609 clean=listclean, unknown=listunknown,
1609 clean=listclean, unknown=listunknown,
1610 listsubrepos=True)
1610 listsubrepos=True)
1611 for rfiles, sfiles in zip(r, s):
1611 for rfiles, sfiles in zip(r, s):
1612 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1612 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1613 except error.LookupError:
1613 except error.LookupError:
1614 self.ui.status(_("skipping missing subrepository: %s\n")
1614 self.ui.status(_("skipping missing subrepository: %s\n")
1615 % subpath)
1615 % subpath)
1616
1616
1617 for l in r:
1617 for l in r:
1618 l.sort()
1618 l.sort()
1619 return r
1619 return r
1620
1620
1621 def heads(self, start=None):
1621 def heads(self, start=None):
1622 heads = self.changelog.heads(start)
1622 heads = self.changelog.heads(start)
1623 # sort the output in rev descending order
1623 # sort the output in rev descending order
1624 return sorted(heads, key=self.changelog.rev, reverse=True)
1624 return sorted(heads, key=self.changelog.rev, reverse=True)
1625
1625
1626 def branchheads(self, branch=None, start=None, closed=False):
1626 def branchheads(self, branch=None, start=None, closed=False):
1627 '''return a (possibly filtered) list of heads for the given branch
1627 '''return a (possibly filtered) list of heads for the given branch
1628
1628
1629 Heads are returned in topological order, from newest to oldest.
1629 Heads are returned in topological order, from newest to oldest.
1630 If branch is None, use the dirstate branch.
1630 If branch is None, use the dirstate branch.
1631 If start is not None, return only heads reachable from start.
1631 If start is not None, return only heads reachable from start.
1632 If closed is True, return heads that are marked as closed as well.
1632 If closed is True, return heads that are marked as closed as well.
1633 '''
1633 '''
1634 if branch is None:
1634 if branch is None:
1635 branch = self[None].branch()
1635 branch = self[None].branch()
1636 branches = self.branchmap()
1636 branches = self.branchmap()
1637 if branch not in branches:
1637 if branch not in branches:
1638 return []
1638 return []
1639 # the cache returns heads ordered lowest to highest
1639 # the cache returns heads ordered lowest to highest
1640 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1640 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1641 if start is not None:
1641 if start is not None:
1642 # filter out the heads that cannot be reached from startrev
1642 # filter out the heads that cannot be reached from startrev
1643 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1643 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1644 bheads = [h for h in bheads if h in fbheads]
1644 bheads = [h for h in bheads if h in fbheads]
1645 return bheads
1645 return bheads
1646
1646
1647 def branches(self, nodes):
1647 def branches(self, nodes):
1648 if not nodes:
1648 if not nodes:
1649 nodes = [self.changelog.tip()]
1649 nodes = [self.changelog.tip()]
1650 b = []
1650 b = []
1651 for n in nodes:
1651 for n in nodes:
1652 t = n
1652 t = n
1653 while True:
1653 while True:
1654 p = self.changelog.parents(n)
1654 p = self.changelog.parents(n)
1655 if p[1] != nullid or p[0] == nullid:
1655 if p[1] != nullid or p[0] == nullid:
1656 b.append((t, n, p[0], p[1]))
1656 b.append((t, n, p[0], p[1]))
1657 break
1657 break
1658 n = p[0]
1658 n = p[0]
1659 return b
1659 return b
1660
1660
1661 def between(self, pairs):
1661 def between(self, pairs):
1662 r = []
1662 r = []
1663
1663
1664 for top, bottom in pairs:
1664 for top, bottom in pairs:
1665 n, l, i = top, [], 0
1665 n, l, i = top, [], 0
1666 f = 1
1666 f = 1
1667
1667
1668 while n != bottom and n != nullid:
1668 while n != bottom and n != nullid:
1669 p = self.changelog.parents(n)[0]
1669 p = self.changelog.parents(n)[0]
1670 if i == f:
1670 if i == f:
1671 l.append(n)
1671 l.append(n)
1672 f = f * 2
1672 f = f * 2
1673 n = p
1673 n = p
1674 i += 1
1674 i += 1
1675
1675
1676 r.append(l)
1676 r.append(l)
1677
1677
1678 return r
1678 return r
1679
1679
1680 def pull(self, remote, heads=None, force=False):
1680 def pull(self, remote, heads=None, force=False):
1681 return exchange.pull (self, remote, heads, force)
1681 return exchange.pull (self, remote, heads, force)
1682
1682
1683 def checkpush(self, pushop):
1683 def checkpush(self, pushop):
1684 """Extensions can override this function if additional checks have
1684 """Extensions can override this function if additional checks have
1685 to be performed before pushing, or call it if they override push
1685 to be performed before pushing, or call it if they override push
1686 command.
1686 command.
1687 """
1687 """
1688 pass
1688 pass
1689
1689
1690 def push(self, remote, force=False, revs=None, newbranch=False):
1690 def push(self, remote, force=False, revs=None, newbranch=False):
1691 return exchange.push(self, remote, force, revs, newbranch)
1691 return exchange.push(self, remote, force, revs, newbranch)
1692
1692
1693 def stream_in(self, remote, requirements):
1693 def stream_in(self, remote, requirements):
1694 lock = self.lock()
1694 lock = self.lock()
1695 try:
1695 try:
1696 # Save remote branchmap. We will use it later
1696 # Save remote branchmap. We will use it later
1697 # to speed up branchcache creation
1697 # to speed up branchcache creation
1698 rbranchmap = None
1698 rbranchmap = None
1699 if remote.capable("branchmap"):
1699 if remote.capable("branchmap"):
1700 rbranchmap = remote.branchmap()
1700 rbranchmap = remote.branchmap()
1701
1701
1702 fp = remote.stream_out()
1702 fp = remote.stream_out()
1703 l = fp.readline()
1703 l = fp.readline()
1704 try:
1704 try:
1705 resp = int(l)
1705 resp = int(l)
1706 except ValueError:
1706 except ValueError:
1707 raise error.ResponseError(
1707 raise error.ResponseError(
1708 _('unexpected response from remote server:'), l)
1708 _('unexpected response from remote server:'), l)
1709 if resp == 1:
1709 if resp == 1:
1710 raise util.Abort(_('operation forbidden by server'))
1710 raise util.Abort(_('operation forbidden by server'))
1711 elif resp == 2:
1711 elif resp == 2:
1712 raise util.Abort(_('locking the remote repository failed'))
1712 raise util.Abort(_('locking the remote repository failed'))
1713 elif resp != 0:
1713 elif resp != 0:
1714 raise util.Abort(_('the server sent an unknown error code'))
1714 raise util.Abort(_('the server sent an unknown error code'))
1715 self.ui.status(_('streaming all changes\n'))
1715 self.ui.status(_('streaming all changes\n'))
1716 l = fp.readline()
1716 l = fp.readline()
1717 try:
1717 try:
1718 total_files, total_bytes = map(int, l.split(' ', 1))
1718 total_files, total_bytes = map(int, l.split(' ', 1))
1719 except (ValueError, TypeError):
1719 except (ValueError, TypeError):
1720 raise error.ResponseError(
1720 raise error.ResponseError(
1721 _('unexpected response from remote server:'), l)
1721 _('unexpected response from remote server:'), l)
1722 self.ui.status(_('%d files to transfer, %s of data\n') %
1722 self.ui.status(_('%d files to transfer, %s of data\n') %
1723 (total_files, util.bytecount(total_bytes)))
1723 (total_files, util.bytecount(total_bytes)))
1724 handled_bytes = 0
1724 handled_bytes = 0
1725 self.ui.progress(_('clone'), 0, total=total_bytes)
1725 self.ui.progress(_('clone'), 0, total=total_bytes)
1726 start = time.time()
1726 start = time.time()
1727
1727
1728 tr = self.transaction(_('clone'))
1728 tr = self.transaction(_('clone'))
1729 try:
1729 try:
1730 for i in xrange(total_files):
1730 for i in xrange(total_files):
1731 # XXX doesn't support '\n' or '\r' in filenames
1731 # XXX doesn't support '\n' or '\r' in filenames
1732 l = fp.readline()
1732 l = fp.readline()
1733 try:
1733 try:
1734 name, size = l.split('\0', 1)
1734 name, size = l.split('\0', 1)
1735 size = int(size)
1735 size = int(size)
1736 except (ValueError, TypeError):
1736 except (ValueError, TypeError):
1737 raise error.ResponseError(
1737 raise error.ResponseError(
1738 _('unexpected response from remote server:'), l)
1738 _('unexpected response from remote server:'), l)
1739 if self.ui.debugflag:
1739 if self.ui.debugflag:
1740 self.ui.debug('adding %s (%s)\n' %
1740 self.ui.debug('adding %s (%s)\n' %
1741 (name, util.bytecount(size)))
1741 (name, util.bytecount(size)))
1742 # for backwards compat, name was partially encoded
1742 # for backwards compat, name was partially encoded
1743 ofp = self.sopener(store.decodedir(name), 'w')
1743 ofp = self.sopener(store.decodedir(name), 'w')
1744 for chunk in util.filechunkiter(fp, limit=size):
1744 for chunk in util.filechunkiter(fp, limit=size):
1745 handled_bytes += len(chunk)
1745 handled_bytes += len(chunk)
1746 self.ui.progress(_('clone'), handled_bytes,
1746 self.ui.progress(_('clone'), handled_bytes,
1747 total=total_bytes)
1747 total=total_bytes)
1748 ofp.write(chunk)
1748 ofp.write(chunk)
1749 ofp.close()
1749 ofp.close()
1750 tr.close()
1750 tr.close()
1751 finally:
1751 finally:
1752 tr.release()
1752 tr.release()
1753
1753
1754 # Writing straight to files circumvented the inmemory caches
1754 # Writing straight to files circumvented the inmemory caches
1755 self.invalidate()
1755 self.invalidate()
1756
1756
1757 elapsed = time.time() - start
1757 elapsed = time.time() - start
1758 if elapsed <= 0:
1758 if elapsed <= 0:
1759 elapsed = 0.001
1759 elapsed = 0.001
1760 self.ui.progress(_('clone'), None)
1760 self.ui.progress(_('clone'), None)
1761 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1761 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1762 (util.bytecount(total_bytes), elapsed,
1762 (util.bytecount(total_bytes), elapsed,
1763 util.bytecount(total_bytes / elapsed)))
1763 util.bytecount(total_bytes / elapsed)))
1764
1764
1765 # new requirements = old non-format requirements +
1765 # new requirements = old non-format requirements +
1766 # new format-related
1766 # new format-related
1767 # requirements from the streamed-in repository
1767 # requirements from the streamed-in repository
1768 requirements.update(set(self.requirements) - self.supportedformats)
1768 requirements.update(set(self.requirements) - self.supportedformats)
1769 self._applyrequirements(requirements)
1769 self._applyrequirements(requirements)
1770 self._writerequirements()
1770 self._writerequirements()
1771
1771
1772 if rbranchmap:
1772 if rbranchmap:
1773 rbheads = []
1773 rbheads = []
1774 for bheads in rbranchmap.itervalues():
1774 for bheads in rbranchmap.itervalues():
1775 rbheads.extend(bheads)
1775 rbheads.extend(bheads)
1776
1776
1777 if rbheads:
1777 if rbheads:
1778 rtiprev = max((int(self.changelog.rev(node))
1778 rtiprev = max((int(self.changelog.rev(node))
1779 for node in rbheads))
1779 for node in rbheads))
1780 cache = branchmap.branchcache(rbranchmap,
1780 cache = branchmap.branchcache(rbranchmap,
1781 self[rtiprev].node(),
1781 self[rtiprev].node(),
1782 rtiprev)
1782 rtiprev)
1783 # Try to stick it as low as possible
1783 # Try to stick it as low as possible
1784 # filter above served are unlikely to be fetch from a clone
1784 # filter above served are unlikely to be fetch from a clone
1785 for candidate in ('base', 'immutable', 'served'):
1785 for candidate in ('base', 'immutable', 'served'):
1786 rview = self.filtered(candidate)
1786 rview = self.filtered(candidate)
1787 if cache.validfor(rview):
1787 if cache.validfor(rview):
1788 self._branchcaches[candidate] = cache
1788 self._branchcaches[candidate] = cache
1789 cache.write(rview)
1789 cache.write(rview)
1790 break
1790 break
1791 self.invalidate()
1791 self.invalidate()
1792 return len(self.heads()) + 1
1792 return len(self.heads()) + 1
1793 finally:
1793 finally:
1794 lock.release()
1794 lock.release()
1795
1795
1796 def clone(self, remote, heads=[], stream=False):
1796 def clone(self, remote, heads=[], stream=False):
1797 '''clone remote repository.
1797 '''clone remote repository.
1798
1798
1799 keyword arguments:
1799 keyword arguments:
1800 heads: list of revs to clone (forces use of pull)
1800 heads: list of revs to clone (forces use of pull)
1801 stream: use streaming clone if possible'''
1801 stream: use streaming clone if possible'''
1802
1802
1803 # now, all clients that can request uncompressed clones can
1803 # now, all clients that can request uncompressed clones can
1804 # read repo formats supported by all servers that can serve
1804 # read repo formats supported by all servers that can serve
1805 # them.
1805 # them.
1806
1806
1807 # if revlog format changes, client will have to check version
1807 # if revlog format changes, client will have to check version
1808 # and format flags on "stream" capability, and use
1808 # and format flags on "stream" capability, and use
1809 # uncompressed only if compatible.
1809 # uncompressed only if compatible.
1810
1810
1811 if not stream:
1811 if not stream:
1812 # if the server explicitly prefers to stream (for fast LANs)
1812 # if the server explicitly prefers to stream (for fast LANs)
1813 stream = remote.capable('stream-preferred')
1813 stream = remote.capable('stream-preferred')
1814
1814
1815 if stream and not heads:
1815 if stream and not heads:
1816 # 'stream' means remote revlog format is revlogv1 only
1816 # 'stream' means remote revlog format is revlogv1 only
1817 if remote.capable('stream'):
1817 if remote.capable('stream'):
1818 return self.stream_in(remote, set(('revlogv1',)))
1818 return self.stream_in(remote, set(('revlogv1',)))
1819 # otherwise, 'streamreqs' contains the remote revlog format
1819 # otherwise, 'streamreqs' contains the remote revlog format
1820 streamreqs = remote.capable('streamreqs')
1820 streamreqs = remote.capable('streamreqs')
1821 if streamreqs:
1821 if streamreqs:
1822 streamreqs = set(streamreqs.split(','))
1822 streamreqs = set(streamreqs.split(','))
1823 # if we support it, stream in and adjust our requirements
1823 # if we support it, stream in and adjust our requirements
1824 if not streamreqs - self.supportedformats:
1824 if not streamreqs - self.supportedformats:
1825 return self.stream_in(remote, streamreqs)
1825 return self.stream_in(remote, streamreqs)
1826 return self.pull(remote, heads)
1826 return self.pull(remote, heads)
1827
1827
1828 def pushkey(self, namespace, key, old, new):
1828 def pushkey(self, namespace, key, old, new):
1829 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1829 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1830 old=old, new=new)
1830 old=old, new=new)
1831 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1831 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1832 ret = pushkey.push(self, namespace, key, old, new)
1832 ret = pushkey.push(self, namespace, key, old, new)
1833 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1833 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1834 ret=ret)
1834 ret=ret)
1835 return ret
1835 return ret
1836
1836
1837 def listkeys(self, namespace):
1837 def listkeys(self, namespace):
1838 self.hook('prelistkeys', throw=True, namespace=namespace)
1838 self.hook('prelistkeys', throw=True, namespace=namespace)
1839 self.ui.debug('listing keys for "%s"\n' % namespace)
1839 self.ui.debug('listing keys for "%s"\n' % namespace)
1840 values = pushkey.list(self, namespace)
1840 values = pushkey.list(self, namespace)
1841 self.hook('listkeys', namespace=namespace, values=values)
1841 self.hook('listkeys', namespace=namespace, values=values)
1842 return values
1842 return values
1843
1843
1844 def debugwireargs(self, one, two, three=None, four=None, five=None):
1844 def debugwireargs(self, one, two, three=None, four=None, five=None):
1845 '''used to test argument passing over the wire'''
1845 '''used to test argument passing over the wire'''
1846 return "%s %s %s %s %s" % (one, two, three, four, five)
1846 return "%s %s %s %s %s" % (one, two, three, four, five)
1847
1847
1848 def savecommitmessage(self, text):
1848 def savecommitmessage(self, text):
1849 fp = self.opener('last-message.txt', 'wb')
1849 fp = self.opener('last-message.txt', 'wb')
1850 try:
1850 try:
1851 fp.write(text)
1851 fp.write(text)
1852 finally:
1852 finally:
1853 fp.close()
1853 fp.close()
1854 return self.pathto(fp.name[len(self.root) + 1:])
1854 return self.pathto(fp.name[len(self.root) + 1:])
1855
1855
1856 # used to avoid circular references so destructors work
1856 # used to avoid circular references so destructors work
1857 def aftertrans(files):
1857 def aftertrans(files):
1858 renamefiles = [tuple(t) for t in files]
1858 renamefiles = [tuple(t) for t in files]
1859 def a():
1859 def a():
1860 for vfs, src, dest in renamefiles:
1860 for vfs, src, dest in renamefiles:
1861 try:
1861 try:
1862 vfs.rename(src, dest)
1862 vfs.rename(src, dest)
1863 except OSError: # journal file does not yet exist
1863 except OSError: # journal file does not yet exist
1864 pass
1864 pass
1865 return a
1865 return a
1866
1866
1867 def undoname(fn):
1867 def undoname(fn):
1868 base, name = os.path.split(fn)
1868 base, name = os.path.split(fn)
1869 assert name.startswith('journal')
1869 assert name.startswith('journal')
1870 return os.path.join(base, name.replace('journal', 'undo', 1))
1870 return os.path.join(base, name.replace('journal', 'undo', 1))
1871
1871
1872 def instance(ui, path, create):
1872 def instance(ui, path, create):
1873 return localrepository(ui, util.urllocalpath(path), create)
1873 return localrepository(ui, util.urllocalpath(path), create)
1874
1874
1875 def islocal(path):
1875 def islocal(path):
1876 return True
1876 return True
General Comments 0
You need to be logged in to leave comments. Login now