##// END OF EJS Templates
clfilter: add actual repo filtering mechanism...
Pierre-Yves David -
r18100:3a6ddacb default
parent child Browse files
Show More
@@ -0,0 +1,94 b''
1 # repoview.py - Filtered view of a localrepo object
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
5 #
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
8
9 import copy
10
11 # function to compute filtered set
12 filtertable = {}
13
14 def filteredrevs(repo, filtername):
15 """returns set of filtered revision for this filter name"""
16 return filtertable[filtername](repo.unfiltered())
17
18 class repoview(object):
19 """Provide a read/write view of a repo through a filtered changelog
20
21 This object is used to access a filtered version of a repository without
22 altering the original repository object itself. We can not alter the
23 original object for two main reasons:
24 - It prevents the use of a repo with multiple filters at the same time. In
25 particular when multiple threads are involved.
26 - It makes scope of the filtering harder to control.
27
28 This object behaves very closely to the original repository. All attribute
29 operations are done on the original repository:
30 - An access to `repoview.someattr` actually returns `repo.someattr`,
31 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
32 - A deletion of `repoview.someattr` actually drops `someattr`
33 from `repo.__dict__`.
34
35 The only exception is the `changelog` property. It is overridden to return
36 a (surface) copy of `repo.changelog` with some revisions filtered. The
37 `filtername` attribute of the view control the revisions that need to be
38 filtered. (the fact the changelog is copied is an implementation detail).
39
40 Unlike attributes, this object intercepts all method calls. This means that
41 all methods are run on the `repoview` object with the filtered `changelog`
42 property. For this purpose the simple `repoview` class must be mixed with
43 the actual class of the repository. This ensures that the resulting
44 `repoview` object have the very same methods than the repo object. This
45 leads to the property below.
46
47 repoview.method() --> repo.__class__.method(repoview)
48
49 The inheritance has to be done dynamically because `repo` can be of any
50 subclasses of `localrepo`. Eg: `bundlerepo` or `httprepo`.
51 """
52
53 def __init__(self, repo, filtername):
54 object.__setattr__(self, '_unfilteredrepo', repo)
55 object.__setattr__(self, 'filtername', filtername)
56
57 # not a cacheproperty on purpose we shall implement a proper cache later
58 @property
59 def changelog(self):
60 """return a filtered version of the changeset
61
62 this changelog must not be used for writing"""
63 # some cache may be implemented later
64 cl = copy.copy(self._unfilteredrepo.changelog)
65 cl.filteredrevs = filteredrevs(self._unfilteredrepo, self.filtername)
66 return cl
67
68 def unfiltered(self):
69 """Return an unfiltered version of a repo"""
70 return self._unfilteredrepo
71
72 def filtered(self, name):
73 """Return a filtered version of a repository"""
74 if name == self.filtername:
75 return self
76 return self.unfiltered().filtered(name)
77
78 # everything access are forwarded to the proxied repo
79 def __getattr__(self, attr):
80 return getattr(self._unfilteredrepo, attr)
81
82 def __setattr__(self, attr, value):
83 return setattr(self._unfilteredrepo, attr, value)
84
85 def __delattr__(self, attr):
86 return delattr(self._unfilteredrepo, attr)
87
88 # The `requirement` attribut is initialiazed during __init__. But
89 # __getattr__ won't be called as it also exists on the class. We need
90 # explicit forwarding to main repo here
91 @property
92 def requirements(self):
93 return self._unfilteredrepo.requirements
94
@@ -1,2683 +1,2691 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class repofilecache(filecache):
21 class repofilecache(filecache):
22 """All filecache usage on repo are done for logic that should be unfiltered
22 """All filecache usage on repo are done for logic that should be unfiltered
23 """
23 """
24
24
25 def __get__(self, repo, type=None):
25 def __get__(self, repo, type=None):
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 def __set__(self, repo, value):
27 def __set__(self, repo, value):
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 def __delete__(self, repo):
29 def __delete__(self, repo):
30 return super(repofilecache, self).__delete__(repo.unfiltered())
30 return super(repofilecache, self).__delete__(repo.unfiltered())
31
31
32 class storecache(repofilecache):
32 class storecache(repofilecache):
33 """filecache for files in the store"""
33 """filecache for files in the store"""
34 def join(self, obj, fname):
34 def join(self, obj, fname):
35 return obj.sjoin(fname)
35 return obj.sjoin(fname)
36
36
37 class unfilteredpropertycache(propertycache):
37 class unfilteredpropertycache(propertycache):
38 """propertycache that apply to unfiltered repo only"""
38 """propertycache that apply to unfiltered repo only"""
39
39
40 def __get__(self, repo, type=None):
40 def __get__(self, repo, type=None):
41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42
42
43 class filteredpropertycache(propertycache):
43 class filteredpropertycache(propertycache):
44 """propertycache that must take filtering in account"""
44 """propertycache that must take filtering in account"""
45
45
46 def cachevalue(self, obj, value):
46 def cachevalue(self, obj, value):
47 object.__setattr__(obj, self.name, value)
47 object.__setattr__(obj, self.name, value)
48
48
49
49
50 def hasunfilteredcache(repo, name):
50 def hasunfilteredcache(repo, name):
51 """check if an repo and a unfilteredproperty cached value for <name>"""
51 """check if an repo and a unfilteredproperty cached value for <name>"""
52 return name in vars(repo.unfiltered())
52 return name in vars(repo.unfiltered())
53
53
54 def unfilteredmethod(orig):
54 def unfilteredmethod(orig):
55 """decorate method that always need to be run on unfiltered version"""
55 """decorate method that always need to be run on unfiltered version"""
56 def wrapper(repo, *args, **kwargs):
56 def wrapper(repo, *args, **kwargs):
57 return orig(repo.unfiltered(), *args, **kwargs)
57 return orig(repo.unfiltered(), *args, **kwargs)
58 return wrapper
58 return wrapper
59
59
60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62
62
63 class localpeer(peer.peerrepository):
63 class localpeer(peer.peerrepository):
64 '''peer for a local repo; reflects only the most recent API'''
64 '''peer for a local repo; reflects only the most recent API'''
65
65
66 def __init__(self, repo, caps=MODERNCAPS):
66 def __init__(self, repo, caps=MODERNCAPS):
67 peer.peerrepository.__init__(self)
67 peer.peerrepository.__init__(self)
68 self._repo = repo
68 self._repo = repo
69 self.ui = repo.ui
69 self.ui = repo.ui
70 self._caps = repo._restrictcapabilities(caps)
70 self._caps = repo._restrictcapabilities(caps)
71 self.requirements = repo.requirements
71 self.requirements = repo.requirements
72 self.supportedformats = repo.supportedformats
72 self.supportedformats = repo.supportedformats
73
73
74 def close(self):
74 def close(self):
75 self._repo.close()
75 self._repo.close()
76
76
77 def _capabilities(self):
77 def _capabilities(self):
78 return self._caps
78 return self._caps
79
79
80 def local(self):
80 def local(self):
81 return self._repo
81 return self._repo
82
82
83 def canpush(self):
83 def canpush(self):
84 return True
84 return True
85
85
86 def url(self):
86 def url(self):
87 return self._repo.url()
87 return self._repo.url()
88
88
89 def lookup(self, key):
89 def lookup(self, key):
90 return self._repo.lookup(key)
90 return self._repo.lookup(key)
91
91
92 def branchmap(self):
92 def branchmap(self):
93 return discovery.visiblebranchmap(self._repo)
93 return discovery.visiblebranchmap(self._repo)
94
94
95 def heads(self):
95 def heads(self):
96 return discovery.visibleheads(self._repo)
96 return discovery.visibleheads(self._repo)
97
97
98 def known(self, nodes):
98 def known(self, nodes):
99 return self._repo.known(nodes)
99 return self._repo.known(nodes)
100
100
101 def getbundle(self, source, heads=None, common=None):
101 def getbundle(self, source, heads=None, common=None):
102 return self._repo.getbundle(source, heads=heads, common=common)
102 return self._repo.getbundle(source, heads=heads, common=common)
103
103
104 # TODO We might want to move the next two calls into legacypeer and add
104 # TODO We might want to move the next two calls into legacypeer and add
105 # unbundle instead.
105 # unbundle instead.
106
106
107 def lock(self):
107 def lock(self):
108 return self._repo.lock()
108 return self._repo.lock()
109
109
110 def addchangegroup(self, cg, source, url):
110 def addchangegroup(self, cg, source, url):
111 return self._repo.addchangegroup(cg, source, url)
111 return self._repo.addchangegroup(cg, source, url)
112
112
113 def pushkey(self, namespace, key, old, new):
113 def pushkey(self, namespace, key, old, new):
114 return self._repo.pushkey(namespace, key, old, new)
114 return self._repo.pushkey(namespace, key, old, new)
115
115
116 def listkeys(self, namespace):
116 def listkeys(self, namespace):
117 return self._repo.listkeys(namespace)
117 return self._repo.listkeys(namespace)
118
118
119 def debugwireargs(self, one, two, three=None, four=None, five=None):
119 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 '''used to test argument passing over the wire'''
120 '''used to test argument passing over the wire'''
121 return "%s %s %s %s %s" % (one, two, three, four, five)
121 return "%s %s %s %s %s" % (one, two, three, four, five)
122
122
123 class locallegacypeer(localpeer):
123 class locallegacypeer(localpeer):
124 '''peer extension which implements legacy methods too; used for tests with
124 '''peer extension which implements legacy methods too; used for tests with
125 restricted capabilities'''
125 restricted capabilities'''
126
126
127 def __init__(self, repo):
127 def __init__(self, repo):
128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129
129
130 def branches(self, nodes):
130 def branches(self, nodes):
131 return self._repo.branches(nodes)
131 return self._repo.branches(nodes)
132
132
133 def between(self, pairs):
133 def between(self, pairs):
134 return self._repo.between(pairs)
134 return self._repo.between(pairs)
135
135
136 def changegroup(self, basenodes, source):
136 def changegroup(self, basenodes, source):
137 return self._repo.changegroup(basenodes, source)
137 return self._repo.changegroup(basenodes, source)
138
138
139 def changegroupsubset(self, bases, heads, source):
139 def changegroupsubset(self, bases, heads, source):
140 return self._repo.changegroupsubset(bases, heads, source)
140 return self._repo.changegroupsubset(bases, heads, source)
141
141
142 class localrepository(object):
142 class localrepository(object):
143
143
144 supportedformats = set(('revlogv1', 'generaldelta'))
144 supportedformats = set(('revlogv1', 'generaldelta'))
145 supported = supportedformats | set(('store', 'fncache', 'shared',
145 supported = supportedformats | set(('store', 'fncache', 'shared',
146 'dotencode'))
146 'dotencode'))
147 openerreqs = set(('revlogv1', 'generaldelta'))
147 openerreqs = set(('revlogv1', 'generaldelta'))
148 requirements = ['revlogv1']
148 requirements = ['revlogv1']
149
149
150 def _baserequirements(self, create):
150 def _baserequirements(self, create):
151 return self.requirements[:]
151 return self.requirements[:]
152
152
153 def __init__(self, baseui, path=None, create=False):
153 def __init__(self, baseui, path=None, create=False):
154 self.wvfs = scmutil.vfs(path, expand=True)
154 self.wvfs = scmutil.vfs(path, expand=True)
155 self.wopener = self.wvfs
155 self.wopener = self.wvfs
156 self.root = self.wvfs.base
156 self.root = self.wvfs.base
157 self.path = self.wvfs.join(".hg")
157 self.path = self.wvfs.join(".hg")
158 self.origroot = path
158 self.origroot = path
159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 self.vfs = scmutil.vfs(self.path)
160 self.vfs = scmutil.vfs(self.path)
161 self.opener = self.vfs
161 self.opener = self.vfs
162 self.baseui = baseui
162 self.baseui = baseui
163 self.ui = baseui.copy()
163 self.ui = baseui.copy()
164 # A list of callback to shape the phase if no data were found.
164 # A list of callback to shape the phase if no data were found.
165 # Callback are in the form: func(repo, roots) --> processed root.
165 # Callback are in the form: func(repo, roots) --> processed root.
166 # This list it to be filled by extension during repo setup
166 # This list it to be filled by extension during repo setup
167 self._phasedefaults = []
167 self._phasedefaults = []
168 try:
168 try:
169 self.ui.readconfig(self.join("hgrc"), self.root)
169 self.ui.readconfig(self.join("hgrc"), self.root)
170 extensions.loadall(self.ui)
170 extensions.loadall(self.ui)
171 except IOError:
171 except IOError:
172 pass
172 pass
173
173
174 if not self.vfs.isdir():
174 if not self.vfs.isdir():
175 if create:
175 if create:
176 if not self.wvfs.exists():
176 if not self.wvfs.exists():
177 self.wvfs.makedirs()
177 self.wvfs.makedirs()
178 self.vfs.makedir(notindexed=True)
178 self.vfs.makedir(notindexed=True)
179 requirements = self._baserequirements(create)
179 requirements = self._baserequirements(create)
180 if self.ui.configbool('format', 'usestore', True):
180 if self.ui.configbool('format', 'usestore', True):
181 self.vfs.mkdir("store")
181 self.vfs.mkdir("store")
182 requirements.append("store")
182 requirements.append("store")
183 if self.ui.configbool('format', 'usefncache', True):
183 if self.ui.configbool('format', 'usefncache', True):
184 requirements.append("fncache")
184 requirements.append("fncache")
185 if self.ui.configbool('format', 'dotencode', True):
185 if self.ui.configbool('format', 'dotencode', True):
186 requirements.append('dotencode')
186 requirements.append('dotencode')
187 # create an invalid changelog
187 # create an invalid changelog
188 self.vfs.append(
188 self.vfs.append(
189 "00changelog.i",
189 "00changelog.i",
190 '\0\0\0\2' # represents revlogv2
190 '\0\0\0\2' # represents revlogv2
191 ' dummy changelog to prevent using the old repo layout'
191 ' dummy changelog to prevent using the old repo layout'
192 )
192 )
193 if self.ui.configbool('format', 'generaldelta', False):
193 if self.ui.configbool('format', 'generaldelta', False):
194 requirements.append("generaldelta")
194 requirements.append("generaldelta")
195 requirements = set(requirements)
195 requirements = set(requirements)
196 else:
196 else:
197 raise error.RepoError(_("repository %s not found") % path)
197 raise error.RepoError(_("repository %s not found") % path)
198 elif create:
198 elif create:
199 raise error.RepoError(_("repository %s already exists") % path)
199 raise error.RepoError(_("repository %s already exists") % path)
200 else:
200 else:
201 try:
201 try:
202 requirements = scmutil.readrequires(self.vfs, self.supported)
202 requirements = scmutil.readrequires(self.vfs, self.supported)
203 except IOError, inst:
203 except IOError, inst:
204 if inst.errno != errno.ENOENT:
204 if inst.errno != errno.ENOENT:
205 raise
205 raise
206 requirements = set()
206 requirements = set()
207
207
208 self.sharedpath = self.path
208 self.sharedpath = self.path
209 try:
209 try:
210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 if not os.path.exists(s):
211 if not os.path.exists(s):
212 raise error.RepoError(
212 raise error.RepoError(
213 _('.hg/sharedpath points to nonexistent directory %s') % s)
213 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 self.sharedpath = s
214 self.sharedpath = s
215 except IOError, inst:
215 except IOError, inst:
216 if inst.errno != errno.ENOENT:
216 if inst.errno != errno.ENOENT:
217 raise
217 raise
218
218
219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 self.spath = self.store.path
220 self.spath = self.store.path
221 self.svfs = self.store.vfs
221 self.svfs = self.store.vfs
222 self.sopener = self.svfs
222 self.sopener = self.svfs
223 self.sjoin = self.store.join
223 self.sjoin = self.store.join
224 self.vfs.createmode = self.store.createmode
224 self.vfs.createmode = self.store.createmode
225 self._applyrequirements(requirements)
225 self._applyrequirements(requirements)
226 if create:
226 if create:
227 self._writerequirements()
227 self._writerequirements()
228
228
229
229
230 self._branchcache = None
230 self._branchcache = None
231 self._branchcachetip = None
231 self._branchcachetip = None
232 self.filterpats = {}
232 self.filterpats = {}
233 self._datafilters = {}
233 self._datafilters = {}
234 self._transref = self._lockref = self._wlockref = None
234 self._transref = self._lockref = self._wlockref = None
235
235
236 # A cache for various files under .hg/ that tracks file changes,
236 # A cache for various files under .hg/ that tracks file changes,
237 # (used by the filecache decorator)
237 # (used by the filecache decorator)
238 #
238 #
239 # Maps a property name to its util.filecacheentry
239 # Maps a property name to its util.filecacheentry
240 self._filecache = {}
240 self._filecache = {}
241
241
242 def close(self):
242 def close(self):
243 pass
243 pass
244
244
245 def _restrictcapabilities(self, caps):
245 def _restrictcapabilities(self, caps):
246 return caps
246 return caps
247
247
248 def _applyrequirements(self, requirements):
248 def _applyrequirements(self, requirements):
249 self.requirements = requirements
249 self.requirements = requirements
250 self.sopener.options = dict((r, 1) for r in requirements
250 self.sopener.options = dict((r, 1) for r in requirements
251 if r in self.openerreqs)
251 if r in self.openerreqs)
252
252
253 def _writerequirements(self):
253 def _writerequirements(self):
254 reqfile = self.opener("requires", "w")
254 reqfile = self.opener("requires", "w")
255 for r in self.requirements:
255 for r in self.requirements:
256 reqfile.write("%s\n" % r)
256 reqfile.write("%s\n" % r)
257 reqfile.close()
257 reqfile.close()
258
258
259 def _checknested(self, path):
259 def _checknested(self, path):
260 """Determine if path is a legal nested repository."""
260 """Determine if path is a legal nested repository."""
261 if not path.startswith(self.root):
261 if not path.startswith(self.root):
262 return False
262 return False
263 subpath = path[len(self.root) + 1:]
263 subpath = path[len(self.root) + 1:]
264 normsubpath = util.pconvert(subpath)
264 normsubpath = util.pconvert(subpath)
265
265
266 # XXX: Checking against the current working copy is wrong in
266 # XXX: Checking against the current working copy is wrong in
267 # the sense that it can reject things like
267 # the sense that it can reject things like
268 #
268 #
269 # $ hg cat -r 10 sub/x.txt
269 # $ hg cat -r 10 sub/x.txt
270 #
270 #
271 # if sub/ is no longer a subrepository in the working copy
271 # if sub/ is no longer a subrepository in the working copy
272 # parent revision.
272 # parent revision.
273 #
273 #
274 # However, it can of course also allow things that would have
274 # However, it can of course also allow things that would have
275 # been rejected before, such as the above cat command if sub/
275 # been rejected before, such as the above cat command if sub/
276 # is a subrepository now, but was a normal directory before.
276 # is a subrepository now, but was a normal directory before.
277 # The old path auditor would have rejected by mistake since it
277 # The old path auditor would have rejected by mistake since it
278 # panics when it sees sub/.hg/.
278 # panics when it sees sub/.hg/.
279 #
279 #
280 # All in all, checking against the working copy seems sensible
280 # All in all, checking against the working copy seems sensible
281 # since we want to prevent access to nested repositories on
281 # since we want to prevent access to nested repositories on
282 # the filesystem *now*.
282 # the filesystem *now*.
283 ctx = self[None]
283 ctx = self[None]
284 parts = util.splitpath(subpath)
284 parts = util.splitpath(subpath)
285 while parts:
285 while parts:
286 prefix = '/'.join(parts)
286 prefix = '/'.join(parts)
287 if prefix in ctx.substate:
287 if prefix in ctx.substate:
288 if prefix == normsubpath:
288 if prefix == normsubpath:
289 return True
289 return True
290 else:
290 else:
291 sub = ctx.sub(prefix)
291 sub = ctx.sub(prefix)
292 return sub.checknested(subpath[len(prefix) + 1:])
292 return sub.checknested(subpath[len(prefix) + 1:])
293 else:
293 else:
294 parts.pop()
294 parts.pop()
295 return False
295 return False
296
296
297 def peer(self):
297 def peer(self):
298 return localpeer(self) # not cached to avoid reference cycle
298 return localpeer(self) # not cached to avoid reference cycle
299
299
300 def unfiltered(self):
300 def unfiltered(self):
301 """Return unfiltered version of the repository
301 """Return unfiltered version of the repository
302
302
303 Intended to be ovewritten by filtered repo."""
303 Intended to be ovewritten by filtered repo."""
304 return self
304 return self
305
305
306 def filtered(self, name):
307 """Return a filtered version of a repository"""
308 # build a new class with the mixin and the current class
309 # (possibily subclass of the repo)
310 class proxycls(repoview.repoview, self.unfiltered().__class__):
311 pass
312 return proxycls(self, name)
313
306 @repofilecache('bookmarks')
314 @repofilecache('bookmarks')
307 def _bookmarks(self):
315 def _bookmarks(self):
308 return bookmarks.bmstore(self)
316 return bookmarks.bmstore(self)
309
317
310 @repofilecache('bookmarks.current')
318 @repofilecache('bookmarks.current')
311 def _bookmarkcurrent(self):
319 def _bookmarkcurrent(self):
312 return bookmarks.readcurrent(self)
320 return bookmarks.readcurrent(self)
313
321
314 def bookmarkheads(self, bookmark):
322 def bookmarkheads(self, bookmark):
315 name = bookmark.split('@', 1)[0]
323 name = bookmark.split('@', 1)[0]
316 heads = []
324 heads = []
317 for mark, n in self._bookmarks.iteritems():
325 for mark, n in self._bookmarks.iteritems():
318 if mark.split('@', 1)[0] == name:
326 if mark.split('@', 1)[0] == name:
319 heads.append(n)
327 heads.append(n)
320 return heads
328 return heads
321
329
322 @storecache('phaseroots')
330 @storecache('phaseroots')
323 def _phasecache(self):
331 def _phasecache(self):
324 return phases.phasecache(self, self._phasedefaults)
332 return phases.phasecache(self, self._phasedefaults)
325
333
326 @storecache('obsstore')
334 @storecache('obsstore')
327 def obsstore(self):
335 def obsstore(self):
328 store = obsolete.obsstore(self.sopener)
336 store = obsolete.obsstore(self.sopener)
329 if store and not obsolete._enabled:
337 if store and not obsolete._enabled:
330 # message is rare enough to not be translated
338 # message is rare enough to not be translated
331 msg = 'obsolete feature not enabled but %i markers found!\n'
339 msg = 'obsolete feature not enabled but %i markers found!\n'
332 self.ui.warn(msg % len(list(store)))
340 self.ui.warn(msg % len(list(store)))
333 return store
341 return store
334
342
335 @unfilteredpropertycache
343 @unfilteredpropertycache
336 def hiddenrevs(self):
344 def hiddenrevs(self):
337 """hiddenrevs: revs that should be hidden by command and tools
345 """hiddenrevs: revs that should be hidden by command and tools
338
346
339 This set is carried on the repo to ease initialization and lazy
347 This set is carried on the repo to ease initialization and lazy
340 loading; it'll probably move back to changelog for efficiency and
348 loading; it'll probably move back to changelog for efficiency and
341 consistency reasons.
349 consistency reasons.
342
350
343 Note that the hiddenrevs will needs invalidations when
351 Note that the hiddenrevs will needs invalidations when
344 - a new changesets is added (possible unstable above extinct)
352 - a new changesets is added (possible unstable above extinct)
345 - a new obsolete marker is added (possible new extinct changeset)
353 - a new obsolete marker is added (possible new extinct changeset)
346
354
347 hidden changesets cannot have non-hidden descendants
355 hidden changesets cannot have non-hidden descendants
348 """
356 """
349 hidden = set()
357 hidden = set()
350 if self.obsstore:
358 if self.obsstore:
351 ### hide extinct changeset that are not accessible by any mean
359 ### hide extinct changeset that are not accessible by any mean
352 hiddenquery = 'extinct() - ::(. + bookmark())'
360 hiddenquery = 'extinct() - ::(. + bookmark())'
353 hidden.update(self.revs(hiddenquery))
361 hidden.update(self.revs(hiddenquery))
354 return hidden
362 return hidden
355
363
356 @storecache('00changelog.i')
364 @storecache('00changelog.i')
357 def changelog(self):
365 def changelog(self):
358 c = changelog.changelog(self.sopener)
366 c = changelog.changelog(self.sopener)
359 if 'HG_PENDING' in os.environ:
367 if 'HG_PENDING' in os.environ:
360 p = os.environ['HG_PENDING']
368 p = os.environ['HG_PENDING']
361 if p.startswith(self.root):
369 if p.startswith(self.root):
362 c.readpending('00changelog.i.a')
370 c.readpending('00changelog.i.a')
363 return c
371 return c
364
372
365 @storecache('00manifest.i')
373 @storecache('00manifest.i')
366 def manifest(self):
374 def manifest(self):
367 return manifest.manifest(self.sopener)
375 return manifest.manifest(self.sopener)
368
376
369 @repofilecache('dirstate')
377 @repofilecache('dirstate')
370 def dirstate(self):
378 def dirstate(self):
371 warned = [0]
379 warned = [0]
372 def validate(node):
380 def validate(node):
373 try:
381 try:
374 self.changelog.rev(node)
382 self.changelog.rev(node)
375 return node
383 return node
376 except error.LookupError:
384 except error.LookupError:
377 if not warned[0]:
385 if not warned[0]:
378 warned[0] = True
386 warned[0] = True
379 self.ui.warn(_("warning: ignoring unknown"
387 self.ui.warn(_("warning: ignoring unknown"
380 " working parent %s!\n") % short(node))
388 " working parent %s!\n") % short(node))
381 return nullid
389 return nullid
382
390
383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
391 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384
392
385 def __getitem__(self, changeid):
393 def __getitem__(self, changeid):
386 if changeid is None:
394 if changeid is None:
387 return context.workingctx(self)
395 return context.workingctx(self)
388 return context.changectx(self, changeid)
396 return context.changectx(self, changeid)
389
397
390 def __contains__(self, changeid):
398 def __contains__(self, changeid):
391 try:
399 try:
392 return bool(self.lookup(changeid))
400 return bool(self.lookup(changeid))
393 except error.RepoLookupError:
401 except error.RepoLookupError:
394 return False
402 return False
395
403
396 def __nonzero__(self):
404 def __nonzero__(self):
397 return True
405 return True
398
406
399 def __len__(self):
407 def __len__(self):
400 return len(self.changelog)
408 return len(self.changelog)
401
409
402 def __iter__(self):
410 def __iter__(self):
403 return iter(self.changelog)
411 return iter(self.changelog)
404
412
405 def revs(self, expr, *args):
413 def revs(self, expr, *args):
406 '''Return a list of revisions matching the given revset'''
414 '''Return a list of revisions matching the given revset'''
407 expr = revset.formatspec(expr, *args)
415 expr = revset.formatspec(expr, *args)
408 m = revset.match(None, expr)
416 m = revset.match(None, expr)
409 return [r for r in m(self, list(self))]
417 return [r for r in m(self, list(self))]
410
418
411 def set(self, expr, *args):
419 def set(self, expr, *args):
412 '''
420 '''
413 Yield a context for each matching revision, after doing arg
421 Yield a context for each matching revision, after doing arg
414 replacement via revset.formatspec
422 replacement via revset.formatspec
415 '''
423 '''
416 for r in self.revs(expr, *args):
424 for r in self.revs(expr, *args):
417 yield self[r]
425 yield self[r]
418
426
419 def url(self):
427 def url(self):
420 return 'file:' + self.root
428 return 'file:' + self.root
421
429
422 def hook(self, name, throw=False, **args):
430 def hook(self, name, throw=False, **args):
423 return hook.hook(self.ui, self, name, throw, **args)
431 return hook.hook(self.ui, self, name, throw, **args)
424
432
425 @unfilteredmethod
433 @unfilteredmethod
426 def _tag(self, names, node, message, local, user, date, extra={}):
434 def _tag(self, names, node, message, local, user, date, extra={}):
427 if isinstance(names, str):
435 if isinstance(names, str):
428 names = (names,)
436 names = (names,)
429
437
430 branches = self.branchmap()
438 branches = self.branchmap()
431 for name in names:
439 for name in names:
432 self.hook('pretag', throw=True, node=hex(node), tag=name,
440 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 local=local)
441 local=local)
434 if name in branches:
442 if name in branches:
435 self.ui.warn(_("warning: tag %s conflicts with existing"
443 self.ui.warn(_("warning: tag %s conflicts with existing"
436 " branch name\n") % name)
444 " branch name\n") % name)
437
445
438 def writetags(fp, names, munge, prevtags):
446 def writetags(fp, names, munge, prevtags):
439 fp.seek(0, 2)
447 fp.seek(0, 2)
440 if prevtags and prevtags[-1] != '\n':
448 if prevtags and prevtags[-1] != '\n':
441 fp.write('\n')
449 fp.write('\n')
442 for name in names:
450 for name in names:
443 m = munge and munge(name) or name
451 m = munge and munge(name) or name
444 if (self._tagscache.tagtypes and
452 if (self._tagscache.tagtypes and
445 name in self._tagscache.tagtypes):
453 name in self._tagscache.tagtypes):
446 old = self.tags().get(name, nullid)
454 old = self.tags().get(name, nullid)
447 fp.write('%s %s\n' % (hex(old), m))
455 fp.write('%s %s\n' % (hex(old), m))
448 fp.write('%s %s\n' % (hex(node), m))
456 fp.write('%s %s\n' % (hex(node), m))
449 fp.close()
457 fp.close()
450
458
451 prevtags = ''
459 prevtags = ''
452 if local:
460 if local:
453 try:
461 try:
454 fp = self.opener('localtags', 'r+')
462 fp = self.opener('localtags', 'r+')
455 except IOError:
463 except IOError:
456 fp = self.opener('localtags', 'a')
464 fp = self.opener('localtags', 'a')
457 else:
465 else:
458 prevtags = fp.read()
466 prevtags = fp.read()
459
467
460 # local tags are stored in the current charset
468 # local tags are stored in the current charset
461 writetags(fp, names, None, prevtags)
469 writetags(fp, names, None, prevtags)
462 for name in names:
470 for name in names:
463 self.hook('tag', node=hex(node), tag=name, local=local)
471 self.hook('tag', node=hex(node), tag=name, local=local)
464 return
472 return
465
473
466 try:
474 try:
467 fp = self.wfile('.hgtags', 'rb+')
475 fp = self.wfile('.hgtags', 'rb+')
468 except IOError, e:
476 except IOError, e:
469 if e.errno != errno.ENOENT:
477 if e.errno != errno.ENOENT:
470 raise
478 raise
471 fp = self.wfile('.hgtags', 'ab')
479 fp = self.wfile('.hgtags', 'ab')
472 else:
480 else:
473 prevtags = fp.read()
481 prevtags = fp.read()
474
482
475 # committed tags are stored in UTF-8
483 # committed tags are stored in UTF-8
476 writetags(fp, names, encoding.fromlocal, prevtags)
484 writetags(fp, names, encoding.fromlocal, prevtags)
477
485
478 fp.close()
486 fp.close()
479
487
480 self.invalidatecaches()
488 self.invalidatecaches()
481
489
482 if '.hgtags' not in self.dirstate:
490 if '.hgtags' not in self.dirstate:
483 self[None].add(['.hgtags'])
491 self[None].add(['.hgtags'])
484
492
485 m = matchmod.exact(self.root, '', ['.hgtags'])
493 m = matchmod.exact(self.root, '', ['.hgtags'])
486 tagnode = self.commit(message, user, date, extra=extra, match=m)
494 tagnode = self.commit(message, user, date, extra=extra, match=m)
487
495
488 for name in names:
496 for name in names:
489 self.hook('tag', node=hex(node), tag=name, local=local)
497 self.hook('tag', node=hex(node), tag=name, local=local)
490
498
491 return tagnode
499 return tagnode
492
500
493 def tag(self, names, node, message, local, user, date):
501 def tag(self, names, node, message, local, user, date):
494 '''tag a revision with one or more symbolic names.
502 '''tag a revision with one or more symbolic names.
495
503
496 names is a list of strings or, when adding a single tag, names may be a
504 names is a list of strings or, when adding a single tag, names may be a
497 string.
505 string.
498
506
499 if local is True, the tags are stored in a per-repository file.
507 if local is True, the tags are stored in a per-repository file.
500 otherwise, they are stored in the .hgtags file, and a new
508 otherwise, they are stored in the .hgtags file, and a new
501 changeset is committed with the change.
509 changeset is committed with the change.
502
510
503 keyword arguments:
511 keyword arguments:
504
512
505 local: whether to store tags in non-version-controlled file
513 local: whether to store tags in non-version-controlled file
506 (default False)
514 (default False)
507
515
508 message: commit message to use if committing
516 message: commit message to use if committing
509
517
510 user: name of user to use if committing
518 user: name of user to use if committing
511
519
512 date: date tuple to use if committing'''
520 date: date tuple to use if committing'''
513
521
514 if not local:
522 if not local:
515 for x in self.status()[:5]:
523 for x in self.status()[:5]:
516 if '.hgtags' in x:
524 if '.hgtags' in x:
517 raise util.Abort(_('working copy of .hgtags is changed '
525 raise util.Abort(_('working copy of .hgtags is changed '
518 '(please commit .hgtags manually)'))
526 '(please commit .hgtags manually)'))
519
527
520 self.tags() # instantiate the cache
528 self.tags() # instantiate the cache
521 self._tag(names, node, message, local, user, date)
529 self._tag(names, node, message, local, user, date)
522
530
523 @filteredpropertycache
531 @filteredpropertycache
524 def _tagscache(self):
532 def _tagscache(self):
525 '''Returns a tagscache object that contains various tags related
533 '''Returns a tagscache object that contains various tags related
526 caches.'''
534 caches.'''
527
535
528 # This simplifies its cache management by having one decorated
536 # This simplifies its cache management by having one decorated
529 # function (this one) and the rest simply fetch things from it.
537 # function (this one) and the rest simply fetch things from it.
530 class tagscache(object):
538 class tagscache(object):
531 def __init__(self):
539 def __init__(self):
532 # These two define the set of tags for this repository. tags
540 # These two define the set of tags for this repository. tags
533 # maps tag name to node; tagtypes maps tag name to 'global' or
541 # maps tag name to node; tagtypes maps tag name to 'global' or
534 # 'local'. (Global tags are defined by .hgtags across all
542 # 'local'. (Global tags are defined by .hgtags across all
535 # heads, and local tags are defined in .hg/localtags.)
543 # heads, and local tags are defined in .hg/localtags.)
536 # They constitute the in-memory cache of tags.
544 # They constitute the in-memory cache of tags.
537 self.tags = self.tagtypes = None
545 self.tags = self.tagtypes = None
538
546
539 self.nodetagscache = self.tagslist = None
547 self.nodetagscache = self.tagslist = None
540
548
541 cache = tagscache()
549 cache = tagscache()
542 cache.tags, cache.tagtypes = self._findtags()
550 cache.tags, cache.tagtypes = self._findtags()
543
551
544 return cache
552 return cache
545
553
546 def tags(self):
554 def tags(self):
547 '''return a mapping of tag to node'''
555 '''return a mapping of tag to node'''
548 t = {}
556 t = {}
549 if self.changelog.filteredrevs:
557 if self.changelog.filteredrevs:
550 tags, tt = self._findtags()
558 tags, tt = self._findtags()
551 else:
559 else:
552 tags = self._tagscache.tags
560 tags = self._tagscache.tags
553 for k, v in tags.iteritems():
561 for k, v in tags.iteritems():
554 try:
562 try:
555 # ignore tags to unknown nodes
563 # ignore tags to unknown nodes
556 self.changelog.rev(v)
564 self.changelog.rev(v)
557 t[k] = v
565 t[k] = v
558 except (error.LookupError, ValueError):
566 except (error.LookupError, ValueError):
559 pass
567 pass
560 return t
568 return t
561
569
562 def _findtags(self):
570 def _findtags(self):
563 '''Do the hard work of finding tags. Return a pair of dicts
571 '''Do the hard work of finding tags. Return a pair of dicts
564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
572 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 maps tag name to a string like \'global\' or \'local\'.
573 maps tag name to a string like \'global\' or \'local\'.
566 Subclasses or extensions are free to add their own tags, but
574 Subclasses or extensions are free to add their own tags, but
567 should be aware that the returned dicts will be retained for the
575 should be aware that the returned dicts will be retained for the
568 duration of the localrepo object.'''
576 duration of the localrepo object.'''
569
577
570 # XXX what tagtype should subclasses/extensions use? Currently
578 # XXX what tagtype should subclasses/extensions use? Currently
571 # mq and bookmarks add tags, but do not set the tagtype at all.
579 # mq and bookmarks add tags, but do not set the tagtype at all.
572 # Should each extension invent its own tag type? Should there
580 # Should each extension invent its own tag type? Should there
573 # be one tagtype for all such "virtual" tags? Or is the status
581 # be one tagtype for all such "virtual" tags? Or is the status
574 # quo fine?
582 # quo fine?
575
583
576 alltags = {} # map tag name to (node, hist)
584 alltags = {} # map tag name to (node, hist)
577 tagtypes = {}
585 tagtypes = {}
578
586
579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
587 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
588 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581
589
582 # Build the return dicts. Have to re-encode tag names because
590 # Build the return dicts. Have to re-encode tag names because
583 # the tags module always uses UTF-8 (in order not to lose info
591 # the tags module always uses UTF-8 (in order not to lose info
584 # writing to the cache), but the rest of Mercurial wants them in
592 # writing to the cache), but the rest of Mercurial wants them in
585 # local encoding.
593 # local encoding.
586 tags = {}
594 tags = {}
587 for (name, (node, hist)) in alltags.iteritems():
595 for (name, (node, hist)) in alltags.iteritems():
588 if node != nullid:
596 if node != nullid:
589 tags[encoding.tolocal(name)] = node
597 tags[encoding.tolocal(name)] = node
590 tags['tip'] = self.changelog.tip()
598 tags['tip'] = self.changelog.tip()
591 tagtypes = dict([(encoding.tolocal(name), value)
599 tagtypes = dict([(encoding.tolocal(name), value)
592 for (name, value) in tagtypes.iteritems()])
600 for (name, value) in tagtypes.iteritems()])
593 return (tags, tagtypes)
601 return (tags, tagtypes)
594
602
595 def tagtype(self, tagname):
603 def tagtype(self, tagname):
596 '''
604 '''
597 return the type of the given tag. result can be:
605 return the type of the given tag. result can be:
598
606
599 'local' : a local tag
607 'local' : a local tag
600 'global' : a global tag
608 'global' : a global tag
601 None : tag does not exist
609 None : tag does not exist
602 '''
610 '''
603
611
604 return self._tagscache.tagtypes.get(tagname)
612 return self._tagscache.tagtypes.get(tagname)
605
613
606 def tagslist(self):
614 def tagslist(self):
607 '''return a list of tags ordered by revision'''
615 '''return a list of tags ordered by revision'''
608 if not self._tagscache.tagslist:
616 if not self._tagscache.tagslist:
609 l = []
617 l = []
610 for t, n in self.tags().iteritems():
618 for t, n in self.tags().iteritems():
611 r = self.changelog.rev(n)
619 r = self.changelog.rev(n)
612 l.append((r, t, n))
620 l.append((r, t, n))
613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
621 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614
622
615 return self._tagscache.tagslist
623 return self._tagscache.tagslist
616
624
617 def nodetags(self, node):
625 def nodetags(self, node):
618 '''return the tags associated with a node'''
626 '''return the tags associated with a node'''
619 if not self._tagscache.nodetagscache:
627 if not self._tagscache.nodetagscache:
620 nodetagscache = {}
628 nodetagscache = {}
621 for t, n in self._tagscache.tags.iteritems():
629 for t, n in self._tagscache.tags.iteritems():
622 nodetagscache.setdefault(n, []).append(t)
630 nodetagscache.setdefault(n, []).append(t)
623 for tags in nodetagscache.itervalues():
631 for tags in nodetagscache.itervalues():
624 tags.sort()
632 tags.sort()
625 self._tagscache.nodetagscache = nodetagscache
633 self._tagscache.nodetagscache = nodetagscache
626 return self._tagscache.nodetagscache.get(node, [])
634 return self._tagscache.nodetagscache.get(node, [])
627
635
628 def nodebookmarks(self, node):
636 def nodebookmarks(self, node):
629 marks = []
637 marks = []
630 for bookmark, n in self._bookmarks.iteritems():
638 for bookmark, n in self._bookmarks.iteritems():
631 if n == node:
639 if n == node:
632 marks.append(bookmark)
640 marks.append(bookmark)
633 return sorted(marks)
641 return sorted(marks)
634
642
635 def _branchtags(self, partial, lrev):
643 def _branchtags(self, partial, lrev):
636 # TODO: rename this function?
644 # TODO: rename this function?
637 tiprev = len(self) - 1
645 tiprev = len(self) - 1
638 if lrev != tiprev:
646 if lrev != tiprev:
639 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
647 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
640 self._updatebranchcache(partial, ctxgen)
648 self._updatebranchcache(partial, ctxgen)
641 self._writebranchcache(partial, self.changelog.tip(), tiprev)
649 self._writebranchcache(partial, self.changelog.tip(), tiprev)
642
650
643 return partial
651 return partial
644
652
645 @unfilteredmethod # Until we get a smarter cache management
653 @unfilteredmethod # Until we get a smarter cache management
646 def updatebranchcache(self):
654 def updatebranchcache(self):
647 tip = self.changelog.tip()
655 tip = self.changelog.tip()
648 if self._branchcache is not None and self._branchcachetip == tip:
656 if self._branchcache is not None and self._branchcachetip == tip:
649 return
657 return
650
658
651 oldtip = self._branchcachetip
659 oldtip = self._branchcachetip
652 self._branchcachetip = tip
660 self._branchcachetip = tip
653 if oldtip is None or oldtip not in self.changelog.nodemap:
661 if oldtip is None or oldtip not in self.changelog.nodemap:
654 partial, last, lrev = self._readbranchcache()
662 partial, last, lrev = self._readbranchcache()
655 else:
663 else:
656 lrev = self.changelog.rev(oldtip)
664 lrev = self.changelog.rev(oldtip)
657 partial = self._branchcache
665 partial = self._branchcache
658
666
659 self._branchtags(partial, lrev)
667 self._branchtags(partial, lrev)
660 # this private cache holds all heads (not just the branch tips)
668 # this private cache holds all heads (not just the branch tips)
661 self._branchcache = partial
669 self._branchcache = partial
662
670
663 def branchmap(self):
671 def branchmap(self):
664 '''returns a dictionary {branch: [branchheads]}'''
672 '''returns a dictionary {branch: [branchheads]}'''
665 if self.changelog.filteredrevs:
673 if self.changelog.filteredrevs:
666 # some changeset are excluded we can't use the cache
674 # some changeset are excluded we can't use the cache
667 branchmap = {}
675 branchmap = {}
668 self._updatebranchcache(branchmap, (self[r] for r in self))
676 self._updatebranchcache(branchmap, (self[r] for r in self))
669 return branchmap
677 return branchmap
670 else:
678 else:
671 self.updatebranchcache()
679 self.updatebranchcache()
672 return self._branchcache
680 return self._branchcache
673
681
674
682
675 def _branchtip(self, heads):
683 def _branchtip(self, heads):
676 '''return the tipmost branch head in heads'''
684 '''return the tipmost branch head in heads'''
677 tip = heads[-1]
685 tip = heads[-1]
678 for h in reversed(heads):
686 for h in reversed(heads):
679 if not self[h].closesbranch():
687 if not self[h].closesbranch():
680 tip = h
688 tip = h
681 break
689 break
682 return tip
690 return tip
683
691
684 def branchtip(self, branch):
692 def branchtip(self, branch):
685 '''return the tip node for a given branch'''
693 '''return the tip node for a given branch'''
686 if branch not in self.branchmap():
694 if branch not in self.branchmap():
687 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
695 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
688 return self._branchtip(self.branchmap()[branch])
696 return self._branchtip(self.branchmap()[branch])
689
697
690 def branchtags(self):
698 def branchtags(self):
691 '''return a dict where branch names map to the tipmost head of
699 '''return a dict where branch names map to the tipmost head of
692 the branch, open heads come before closed'''
700 the branch, open heads come before closed'''
693 bt = {}
701 bt = {}
694 for bn, heads in self.branchmap().iteritems():
702 for bn, heads in self.branchmap().iteritems():
695 bt[bn] = self._branchtip(heads)
703 bt[bn] = self._branchtip(heads)
696 return bt
704 return bt
697
705
698 @unfilteredmethod # Until we get a smarter cache management
706 @unfilteredmethod # Until we get a smarter cache management
699 def _readbranchcache(self):
707 def _readbranchcache(self):
700 partial = {}
708 partial = {}
701 try:
709 try:
702 f = self.opener("cache/branchheads")
710 f = self.opener("cache/branchheads")
703 lines = f.read().split('\n')
711 lines = f.read().split('\n')
704 f.close()
712 f.close()
705 except (IOError, OSError):
713 except (IOError, OSError):
706 return {}, nullid, nullrev
714 return {}, nullid, nullrev
707
715
708 try:
716 try:
709 last, lrev = lines.pop(0).split(" ", 1)
717 last, lrev = lines.pop(0).split(" ", 1)
710 last, lrev = bin(last), int(lrev)
718 last, lrev = bin(last), int(lrev)
711 if lrev >= len(self) or self[lrev].node() != last:
719 if lrev >= len(self) or self[lrev].node() != last:
712 # invalidate the cache
720 # invalidate the cache
713 raise ValueError('invalidating branch cache (tip differs)')
721 raise ValueError('invalidating branch cache (tip differs)')
714 for l in lines:
722 for l in lines:
715 if not l:
723 if not l:
716 continue
724 continue
717 node, label = l.split(" ", 1)
725 node, label = l.split(" ", 1)
718 label = encoding.tolocal(label.strip())
726 label = encoding.tolocal(label.strip())
719 if not node in self:
727 if not node in self:
720 raise ValueError('invalidating branch cache because node '+
728 raise ValueError('invalidating branch cache because node '+
721 '%s does not exist' % node)
729 '%s does not exist' % node)
722 partial.setdefault(label, []).append(bin(node))
730 partial.setdefault(label, []).append(bin(node))
723 except KeyboardInterrupt:
731 except KeyboardInterrupt:
724 raise
732 raise
725 except Exception, inst:
733 except Exception, inst:
726 if self.ui.debugflag:
734 if self.ui.debugflag:
727 self.ui.warn(str(inst), '\n')
735 self.ui.warn(str(inst), '\n')
728 partial, last, lrev = {}, nullid, nullrev
736 partial, last, lrev = {}, nullid, nullrev
729 return partial, last, lrev
737 return partial, last, lrev
730
738
731 @unfilteredmethod # Until we get a smarter cache management
739 @unfilteredmethod # Until we get a smarter cache management
732 def _writebranchcache(self, branches, tip, tiprev):
740 def _writebranchcache(self, branches, tip, tiprev):
733 try:
741 try:
734 f = self.opener("cache/branchheads", "w", atomictemp=True)
742 f = self.opener("cache/branchheads", "w", atomictemp=True)
735 f.write("%s %s\n" % (hex(tip), tiprev))
743 f.write("%s %s\n" % (hex(tip), tiprev))
736 for label, nodes in branches.iteritems():
744 for label, nodes in branches.iteritems():
737 for node in nodes:
745 for node in nodes:
738 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
746 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
739 f.close()
747 f.close()
740 except (IOError, OSError):
748 except (IOError, OSError):
741 pass
749 pass
742
750
743 @unfilteredmethod # Until we get a smarter cache management
751 @unfilteredmethod # Until we get a smarter cache management
744 def _updatebranchcache(self, partial, ctxgen):
752 def _updatebranchcache(self, partial, ctxgen):
745 """Given a branchhead cache, partial, that may have extra nodes or be
753 """Given a branchhead cache, partial, that may have extra nodes or be
746 missing heads, and a generator of nodes that are at least a superset of
754 missing heads, and a generator of nodes that are at least a superset of
747 heads missing, this function updates partial to be correct.
755 heads missing, this function updates partial to be correct.
748 """
756 """
749 # collect new branch entries
757 # collect new branch entries
750 newbranches = {}
758 newbranches = {}
751 for c in ctxgen:
759 for c in ctxgen:
752 newbranches.setdefault(c.branch(), []).append(c.node())
760 newbranches.setdefault(c.branch(), []).append(c.node())
753 # if older branchheads are reachable from new ones, they aren't
761 # if older branchheads are reachable from new ones, they aren't
754 # really branchheads. Note checking parents is insufficient:
762 # really branchheads. Note checking parents is insufficient:
755 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
763 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
756 for branch, newnodes in newbranches.iteritems():
764 for branch, newnodes in newbranches.iteritems():
757 bheads = partial.setdefault(branch, [])
765 bheads = partial.setdefault(branch, [])
758 # Remove candidate heads that no longer are in the repo (e.g., as
766 # Remove candidate heads that no longer are in the repo (e.g., as
759 # the result of a strip that just happened). Avoid using 'node in
767 # the result of a strip that just happened). Avoid using 'node in
760 # self' here because that dives down into branchcache code somewhat
768 # self' here because that dives down into branchcache code somewhat
761 # recursively.
769 # recursively.
762 bheadrevs = [self.changelog.rev(node) for node in bheads
770 bheadrevs = [self.changelog.rev(node) for node in bheads
763 if self.changelog.hasnode(node)]
771 if self.changelog.hasnode(node)]
764 newheadrevs = [self.changelog.rev(node) for node in newnodes
772 newheadrevs = [self.changelog.rev(node) for node in newnodes
765 if self.changelog.hasnode(node)]
773 if self.changelog.hasnode(node)]
766 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
774 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
767 # Remove duplicates - nodes that are in newheadrevs and are already
775 # Remove duplicates - nodes that are in newheadrevs and are already
768 # in bheadrevs. This can happen if you strip a node whose parent
776 # in bheadrevs. This can happen if you strip a node whose parent
769 # was already a head (because they're on different branches).
777 # was already a head (because they're on different branches).
770 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
778 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
771
779
772 # Starting from tip means fewer passes over reachable. If we know
780 # Starting from tip means fewer passes over reachable. If we know
773 # the new candidates are not ancestors of existing heads, we don't
781 # the new candidates are not ancestors of existing heads, we don't
774 # have to examine ancestors of existing heads
782 # have to examine ancestors of existing heads
775 if ctxisnew:
783 if ctxisnew:
776 iterrevs = sorted(newheadrevs)
784 iterrevs = sorted(newheadrevs)
777 else:
785 else:
778 iterrevs = list(bheadrevs)
786 iterrevs = list(bheadrevs)
779
787
780 # This loop prunes out two kinds of heads - heads that are
788 # This loop prunes out two kinds of heads - heads that are
781 # superseded by a head in newheadrevs, and newheadrevs that are not
789 # superseded by a head in newheadrevs, and newheadrevs that are not
782 # heads because an existing head is their descendant.
790 # heads because an existing head is their descendant.
783 while iterrevs:
791 while iterrevs:
784 latest = iterrevs.pop()
792 latest = iterrevs.pop()
785 if latest not in bheadrevs:
793 if latest not in bheadrevs:
786 continue
794 continue
787 ancestors = set(self.changelog.ancestors([latest],
795 ancestors = set(self.changelog.ancestors([latest],
788 bheadrevs[0]))
796 bheadrevs[0]))
789 if ancestors:
797 if ancestors:
790 bheadrevs = [b for b in bheadrevs if b not in ancestors]
798 bheadrevs = [b for b in bheadrevs if b not in ancestors]
791 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
799 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
792
800
793 # There may be branches that cease to exist when the last commit in the
801 # There may be branches that cease to exist when the last commit in the
794 # branch was stripped. This code filters them out. Note that the
802 # branch was stripped. This code filters them out. Note that the
795 # branch that ceased to exist may not be in newbranches because
803 # branch that ceased to exist may not be in newbranches because
796 # newbranches is the set of candidate heads, which when you strip the
804 # newbranches is the set of candidate heads, which when you strip the
797 # last commit in a branch will be the parent branch.
805 # last commit in a branch will be the parent branch.
798 for branch in partial.keys():
806 for branch in partial.keys():
799 nodes = [head for head in partial[branch]
807 nodes = [head for head in partial[branch]
800 if self.changelog.hasnode(head)]
808 if self.changelog.hasnode(head)]
801 if not nodes:
809 if not nodes:
802 del partial[branch]
810 del partial[branch]
803
811
804 def lookup(self, key):
812 def lookup(self, key):
805 return self[key].node()
813 return self[key].node()
806
814
807 def lookupbranch(self, key, remote=None):
815 def lookupbranch(self, key, remote=None):
808 repo = remote or self
816 repo = remote or self
809 if key in repo.branchmap():
817 if key in repo.branchmap():
810 return key
818 return key
811
819
812 repo = (remote and remote.local()) and remote or self
820 repo = (remote and remote.local()) and remote or self
813 return repo[key].branch()
821 return repo[key].branch()
814
822
815 def known(self, nodes):
823 def known(self, nodes):
816 nm = self.changelog.nodemap
824 nm = self.changelog.nodemap
817 pc = self._phasecache
825 pc = self._phasecache
818 result = []
826 result = []
819 for n in nodes:
827 for n in nodes:
820 r = nm.get(n)
828 r = nm.get(n)
821 resp = not (r is None or pc.phase(self, r) >= phases.secret)
829 resp = not (r is None or pc.phase(self, r) >= phases.secret)
822 result.append(resp)
830 result.append(resp)
823 return result
831 return result
824
832
825 def local(self):
833 def local(self):
826 return self
834 return self
827
835
828 def cancopy(self):
836 def cancopy(self):
829 return self.local() # so statichttprepo's override of local() works
837 return self.local() # so statichttprepo's override of local() works
830
838
831 def join(self, f):
839 def join(self, f):
832 return os.path.join(self.path, f)
840 return os.path.join(self.path, f)
833
841
834 def wjoin(self, f):
842 def wjoin(self, f):
835 return os.path.join(self.root, f)
843 return os.path.join(self.root, f)
836
844
837 def file(self, f):
845 def file(self, f):
838 if f[0] == '/':
846 if f[0] == '/':
839 f = f[1:]
847 f = f[1:]
840 return filelog.filelog(self.sopener, f)
848 return filelog.filelog(self.sopener, f)
841
849
842 def changectx(self, changeid):
850 def changectx(self, changeid):
843 return self[changeid]
851 return self[changeid]
844
852
845 def parents(self, changeid=None):
853 def parents(self, changeid=None):
846 '''get list of changectxs for parents of changeid'''
854 '''get list of changectxs for parents of changeid'''
847 return self[changeid].parents()
855 return self[changeid].parents()
848
856
849 def setparents(self, p1, p2=nullid):
857 def setparents(self, p1, p2=nullid):
850 copies = self.dirstate.setparents(p1, p2)
858 copies = self.dirstate.setparents(p1, p2)
851 if copies:
859 if copies:
852 # Adjust copy records, the dirstate cannot do it, it
860 # Adjust copy records, the dirstate cannot do it, it
853 # requires access to parents manifests. Preserve them
861 # requires access to parents manifests. Preserve them
854 # only for entries added to first parent.
862 # only for entries added to first parent.
855 pctx = self[p1]
863 pctx = self[p1]
856 for f in copies:
864 for f in copies:
857 if f not in pctx and copies[f] in pctx:
865 if f not in pctx and copies[f] in pctx:
858 self.dirstate.copy(copies[f], f)
866 self.dirstate.copy(copies[f], f)
859
867
860 def filectx(self, path, changeid=None, fileid=None):
868 def filectx(self, path, changeid=None, fileid=None):
861 """changeid can be a changeset revision, node, or tag.
869 """changeid can be a changeset revision, node, or tag.
862 fileid can be a file revision or node."""
870 fileid can be a file revision or node."""
863 return context.filectx(self, path, changeid, fileid)
871 return context.filectx(self, path, changeid, fileid)
864
872
865 def getcwd(self):
873 def getcwd(self):
866 return self.dirstate.getcwd()
874 return self.dirstate.getcwd()
867
875
868 def pathto(self, f, cwd=None):
876 def pathto(self, f, cwd=None):
869 return self.dirstate.pathto(f, cwd)
877 return self.dirstate.pathto(f, cwd)
870
878
871 def wfile(self, f, mode='r'):
879 def wfile(self, f, mode='r'):
872 return self.wopener(f, mode)
880 return self.wopener(f, mode)
873
881
874 def _link(self, f):
882 def _link(self, f):
875 return os.path.islink(self.wjoin(f))
883 return os.path.islink(self.wjoin(f))
876
884
877 def _loadfilter(self, filter):
885 def _loadfilter(self, filter):
878 if filter not in self.filterpats:
886 if filter not in self.filterpats:
879 l = []
887 l = []
880 for pat, cmd in self.ui.configitems(filter):
888 for pat, cmd in self.ui.configitems(filter):
881 if cmd == '!':
889 if cmd == '!':
882 continue
890 continue
883 mf = matchmod.match(self.root, '', [pat])
891 mf = matchmod.match(self.root, '', [pat])
884 fn = None
892 fn = None
885 params = cmd
893 params = cmd
886 for name, filterfn in self._datafilters.iteritems():
894 for name, filterfn in self._datafilters.iteritems():
887 if cmd.startswith(name):
895 if cmd.startswith(name):
888 fn = filterfn
896 fn = filterfn
889 params = cmd[len(name):].lstrip()
897 params = cmd[len(name):].lstrip()
890 break
898 break
891 if not fn:
899 if not fn:
892 fn = lambda s, c, **kwargs: util.filter(s, c)
900 fn = lambda s, c, **kwargs: util.filter(s, c)
893 # Wrap old filters not supporting keyword arguments
901 # Wrap old filters not supporting keyword arguments
894 if not inspect.getargspec(fn)[2]:
902 if not inspect.getargspec(fn)[2]:
895 oldfn = fn
903 oldfn = fn
896 fn = lambda s, c, **kwargs: oldfn(s, c)
904 fn = lambda s, c, **kwargs: oldfn(s, c)
897 l.append((mf, fn, params))
905 l.append((mf, fn, params))
898 self.filterpats[filter] = l
906 self.filterpats[filter] = l
899 return self.filterpats[filter]
907 return self.filterpats[filter]
900
908
901 def _filter(self, filterpats, filename, data):
909 def _filter(self, filterpats, filename, data):
902 for mf, fn, cmd in filterpats:
910 for mf, fn, cmd in filterpats:
903 if mf(filename):
911 if mf(filename):
904 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
912 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
905 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
913 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
906 break
914 break
907
915
908 return data
916 return data
909
917
910 @unfilteredpropertycache
918 @unfilteredpropertycache
911 def _encodefilterpats(self):
919 def _encodefilterpats(self):
912 return self._loadfilter('encode')
920 return self._loadfilter('encode')
913
921
914 @unfilteredpropertycache
922 @unfilteredpropertycache
915 def _decodefilterpats(self):
923 def _decodefilterpats(self):
916 return self._loadfilter('decode')
924 return self._loadfilter('decode')
917
925
918 def adddatafilter(self, name, filter):
926 def adddatafilter(self, name, filter):
919 self._datafilters[name] = filter
927 self._datafilters[name] = filter
920
928
921 def wread(self, filename):
929 def wread(self, filename):
922 if self._link(filename):
930 if self._link(filename):
923 data = os.readlink(self.wjoin(filename))
931 data = os.readlink(self.wjoin(filename))
924 else:
932 else:
925 data = self.wopener.read(filename)
933 data = self.wopener.read(filename)
926 return self._filter(self._encodefilterpats, filename, data)
934 return self._filter(self._encodefilterpats, filename, data)
927
935
928 def wwrite(self, filename, data, flags):
936 def wwrite(self, filename, data, flags):
929 data = self._filter(self._decodefilterpats, filename, data)
937 data = self._filter(self._decodefilterpats, filename, data)
930 if 'l' in flags:
938 if 'l' in flags:
931 self.wopener.symlink(data, filename)
939 self.wopener.symlink(data, filename)
932 else:
940 else:
933 self.wopener.write(filename, data)
941 self.wopener.write(filename, data)
934 if 'x' in flags:
942 if 'x' in flags:
935 util.setflags(self.wjoin(filename), False, True)
943 util.setflags(self.wjoin(filename), False, True)
936
944
937 def wwritedata(self, filename, data):
945 def wwritedata(self, filename, data):
938 return self._filter(self._decodefilterpats, filename, data)
946 return self._filter(self._decodefilterpats, filename, data)
939
947
940 def transaction(self, desc):
948 def transaction(self, desc):
941 tr = self._transref and self._transref() or None
949 tr = self._transref and self._transref() or None
942 if tr and tr.running():
950 if tr and tr.running():
943 return tr.nest()
951 return tr.nest()
944
952
945 # abort here if the journal already exists
953 # abort here if the journal already exists
946 if os.path.exists(self.sjoin("journal")):
954 if os.path.exists(self.sjoin("journal")):
947 raise error.RepoError(
955 raise error.RepoError(
948 _("abandoned transaction found - run hg recover"))
956 _("abandoned transaction found - run hg recover"))
949
957
950 self._writejournal(desc)
958 self._writejournal(desc)
951 renames = [(x, undoname(x)) for x in self._journalfiles()]
959 renames = [(x, undoname(x)) for x in self._journalfiles()]
952
960
953 tr = transaction.transaction(self.ui.warn, self.sopener,
961 tr = transaction.transaction(self.ui.warn, self.sopener,
954 self.sjoin("journal"),
962 self.sjoin("journal"),
955 aftertrans(renames),
963 aftertrans(renames),
956 self.store.createmode)
964 self.store.createmode)
957 self._transref = weakref.ref(tr)
965 self._transref = weakref.ref(tr)
958 return tr
966 return tr
959
967
960 def _journalfiles(self):
968 def _journalfiles(self):
961 return (self.sjoin('journal'), self.join('journal.dirstate'),
969 return (self.sjoin('journal'), self.join('journal.dirstate'),
962 self.join('journal.branch'), self.join('journal.desc'),
970 self.join('journal.branch'), self.join('journal.desc'),
963 self.join('journal.bookmarks'),
971 self.join('journal.bookmarks'),
964 self.sjoin('journal.phaseroots'))
972 self.sjoin('journal.phaseroots'))
965
973
966 def undofiles(self):
974 def undofiles(self):
967 return [undoname(x) for x in self._journalfiles()]
975 return [undoname(x) for x in self._journalfiles()]
968
976
969 def _writejournal(self, desc):
977 def _writejournal(self, desc):
970 self.opener.write("journal.dirstate",
978 self.opener.write("journal.dirstate",
971 self.opener.tryread("dirstate"))
979 self.opener.tryread("dirstate"))
972 self.opener.write("journal.branch",
980 self.opener.write("journal.branch",
973 encoding.fromlocal(self.dirstate.branch()))
981 encoding.fromlocal(self.dirstate.branch()))
974 self.opener.write("journal.desc",
982 self.opener.write("journal.desc",
975 "%d\n%s\n" % (len(self), desc))
983 "%d\n%s\n" % (len(self), desc))
976 self.opener.write("journal.bookmarks",
984 self.opener.write("journal.bookmarks",
977 self.opener.tryread("bookmarks"))
985 self.opener.tryread("bookmarks"))
978 self.sopener.write("journal.phaseroots",
986 self.sopener.write("journal.phaseroots",
979 self.sopener.tryread("phaseroots"))
987 self.sopener.tryread("phaseroots"))
980
988
981 def recover(self):
989 def recover(self):
982 lock = self.lock()
990 lock = self.lock()
983 try:
991 try:
984 if os.path.exists(self.sjoin("journal")):
992 if os.path.exists(self.sjoin("journal")):
985 self.ui.status(_("rolling back interrupted transaction\n"))
993 self.ui.status(_("rolling back interrupted transaction\n"))
986 transaction.rollback(self.sopener, self.sjoin("journal"),
994 transaction.rollback(self.sopener, self.sjoin("journal"),
987 self.ui.warn)
995 self.ui.warn)
988 self.invalidate()
996 self.invalidate()
989 return True
997 return True
990 else:
998 else:
991 self.ui.warn(_("no interrupted transaction available\n"))
999 self.ui.warn(_("no interrupted transaction available\n"))
992 return False
1000 return False
993 finally:
1001 finally:
994 lock.release()
1002 lock.release()
995
1003
996 def rollback(self, dryrun=False, force=False):
1004 def rollback(self, dryrun=False, force=False):
997 wlock = lock = None
1005 wlock = lock = None
998 try:
1006 try:
999 wlock = self.wlock()
1007 wlock = self.wlock()
1000 lock = self.lock()
1008 lock = self.lock()
1001 if os.path.exists(self.sjoin("undo")):
1009 if os.path.exists(self.sjoin("undo")):
1002 return self._rollback(dryrun, force)
1010 return self._rollback(dryrun, force)
1003 else:
1011 else:
1004 self.ui.warn(_("no rollback information available\n"))
1012 self.ui.warn(_("no rollback information available\n"))
1005 return 1
1013 return 1
1006 finally:
1014 finally:
1007 release(lock, wlock)
1015 release(lock, wlock)
1008
1016
1009 @unfilteredmethod # Until we get smarter cache management
1017 @unfilteredmethod # Until we get smarter cache management
1010 def _rollback(self, dryrun, force):
1018 def _rollback(self, dryrun, force):
1011 ui = self.ui
1019 ui = self.ui
1012 try:
1020 try:
1013 args = self.opener.read('undo.desc').splitlines()
1021 args = self.opener.read('undo.desc').splitlines()
1014 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1022 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1015 if len(args) >= 3:
1023 if len(args) >= 3:
1016 detail = args[2]
1024 detail = args[2]
1017 oldtip = oldlen - 1
1025 oldtip = oldlen - 1
1018
1026
1019 if detail and ui.verbose:
1027 if detail and ui.verbose:
1020 msg = (_('repository tip rolled back to revision %s'
1028 msg = (_('repository tip rolled back to revision %s'
1021 ' (undo %s: %s)\n')
1029 ' (undo %s: %s)\n')
1022 % (oldtip, desc, detail))
1030 % (oldtip, desc, detail))
1023 else:
1031 else:
1024 msg = (_('repository tip rolled back to revision %s'
1032 msg = (_('repository tip rolled back to revision %s'
1025 ' (undo %s)\n')
1033 ' (undo %s)\n')
1026 % (oldtip, desc))
1034 % (oldtip, desc))
1027 except IOError:
1035 except IOError:
1028 msg = _('rolling back unknown transaction\n')
1036 msg = _('rolling back unknown transaction\n')
1029 desc = None
1037 desc = None
1030
1038
1031 if not force and self['.'] != self['tip'] and desc == 'commit':
1039 if not force and self['.'] != self['tip'] and desc == 'commit':
1032 raise util.Abort(
1040 raise util.Abort(
1033 _('rollback of last commit while not checked out '
1041 _('rollback of last commit while not checked out '
1034 'may lose data'), hint=_('use -f to force'))
1042 'may lose data'), hint=_('use -f to force'))
1035
1043
1036 ui.status(msg)
1044 ui.status(msg)
1037 if dryrun:
1045 if dryrun:
1038 return 0
1046 return 0
1039
1047
1040 parents = self.dirstate.parents()
1048 parents = self.dirstate.parents()
1041 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1049 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1042 if os.path.exists(self.join('undo.bookmarks')):
1050 if os.path.exists(self.join('undo.bookmarks')):
1043 util.rename(self.join('undo.bookmarks'),
1051 util.rename(self.join('undo.bookmarks'),
1044 self.join('bookmarks'))
1052 self.join('bookmarks'))
1045 if os.path.exists(self.sjoin('undo.phaseroots')):
1053 if os.path.exists(self.sjoin('undo.phaseroots')):
1046 util.rename(self.sjoin('undo.phaseroots'),
1054 util.rename(self.sjoin('undo.phaseroots'),
1047 self.sjoin('phaseroots'))
1055 self.sjoin('phaseroots'))
1048 self.invalidate()
1056 self.invalidate()
1049
1057
1050 # Discard all cache entries to force reloading everything.
1058 # Discard all cache entries to force reloading everything.
1051 self._filecache.clear()
1059 self._filecache.clear()
1052
1060
1053 parentgone = (parents[0] not in self.changelog.nodemap or
1061 parentgone = (parents[0] not in self.changelog.nodemap or
1054 parents[1] not in self.changelog.nodemap)
1062 parents[1] not in self.changelog.nodemap)
1055 if parentgone:
1063 if parentgone:
1056 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1064 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1057 try:
1065 try:
1058 branch = self.opener.read('undo.branch')
1066 branch = self.opener.read('undo.branch')
1059 self.dirstate.setbranch(encoding.tolocal(branch))
1067 self.dirstate.setbranch(encoding.tolocal(branch))
1060 except IOError:
1068 except IOError:
1061 ui.warn(_('named branch could not be reset: '
1069 ui.warn(_('named branch could not be reset: '
1062 'current branch is still \'%s\'\n')
1070 'current branch is still \'%s\'\n')
1063 % self.dirstate.branch())
1071 % self.dirstate.branch())
1064
1072
1065 self.dirstate.invalidate()
1073 self.dirstate.invalidate()
1066 parents = tuple([p.rev() for p in self.parents()])
1074 parents = tuple([p.rev() for p in self.parents()])
1067 if len(parents) > 1:
1075 if len(parents) > 1:
1068 ui.status(_('working directory now based on '
1076 ui.status(_('working directory now based on '
1069 'revisions %d and %d\n') % parents)
1077 'revisions %d and %d\n') % parents)
1070 else:
1078 else:
1071 ui.status(_('working directory now based on '
1079 ui.status(_('working directory now based on '
1072 'revision %d\n') % parents)
1080 'revision %d\n') % parents)
1073 # TODO: if we know which new heads may result from this rollback, pass
1081 # TODO: if we know which new heads may result from this rollback, pass
1074 # them to destroy(), which will prevent the branchhead cache from being
1082 # them to destroy(), which will prevent the branchhead cache from being
1075 # invalidated.
1083 # invalidated.
1076 self.destroyed()
1084 self.destroyed()
1077 return 0
1085 return 0
1078
1086
1079 def invalidatecaches(self):
1087 def invalidatecaches(self):
1080
1088
1081 if '_tagscache' in vars(self):
1089 if '_tagscache' in vars(self):
1082 # can't use delattr on proxy
1090 # can't use delattr on proxy
1083 del self.__dict__['_tagscache']
1091 del self.__dict__['_tagscache']
1084
1092
1085 self.unfiltered()._branchcache = None # in UTF-8
1093 self.unfiltered()._branchcache = None # in UTF-8
1086 self.unfiltered()._branchcachetip = None
1094 self.unfiltered()._branchcachetip = None
1087 obsolete.clearobscaches(self)
1095 obsolete.clearobscaches(self)
1088
1096
1089 def invalidatedirstate(self):
1097 def invalidatedirstate(self):
1090 '''Invalidates the dirstate, causing the next call to dirstate
1098 '''Invalidates the dirstate, causing the next call to dirstate
1091 to check if it was modified since the last time it was read,
1099 to check if it was modified since the last time it was read,
1092 rereading it if it has.
1100 rereading it if it has.
1093
1101
1094 This is different to dirstate.invalidate() that it doesn't always
1102 This is different to dirstate.invalidate() that it doesn't always
1095 rereads the dirstate. Use dirstate.invalidate() if you want to
1103 rereads the dirstate. Use dirstate.invalidate() if you want to
1096 explicitly read the dirstate again (i.e. restoring it to a previous
1104 explicitly read the dirstate again (i.e. restoring it to a previous
1097 known good state).'''
1105 known good state).'''
1098 if hasunfilteredcache(self, 'dirstate'):
1106 if hasunfilteredcache(self, 'dirstate'):
1099 for k in self.dirstate._filecache:
1107 for k in self.dirstate._filecache:
1100 try:
1108 try:
1101 delattr(self.dirstate, k)
1109 delattr(self.dirstate, k)
1102 except AttributeError:
1110 except AttributeError:
1103 pass
1111 pass
1104 delattr(self.unfiltered(), 'dirstate')
1112 delattr(self.unfiltered(), 'dirstate')
1105
1113
1106 def invalidate(self):
1114 def invalidate(self):
1107 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1115 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1108 for k in self._filecache:
1116 for k in self._filecache:
1109 # dirstate is invalidated separately in invalidatedirstate()
1117 # dirstate is invalidated separately in invalidatedirstate()
1110 if k == 'dirstate':
1118 if k == 'dirstate':
1111 continue
1119 continue
1112
1120
1113 try:
1121 try:
1114 delattr(unfiltered, k)
1122 delattr(unfiltered, k)
1115 except AttributeError:
1123 except AttributeError:
1116 pass
1124 pass
1117 self.invalidatecaches()
1125 self.invalidatecaches()
1118
1126
1119 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1127 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1120 try:
1128 try:
1121 l = lock.lock(lockname, 0, releasefn, desc=desc)
1129 l = lock.lock(lockname, 0, releasefn, desc=desc)
1122 except error.LockHeld, inst:
1130 except error.LockHeld, inst:
1123 if not wait:
1131 if not wait:
1124 raise
1132 raise
1125 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1133 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1126 (desc, inst.locker))
1134 (desc, inst.locker))
1127 # default to 600 seconds timeout
1135 # default to 600 seconds timeout
1128 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1136 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1129 releasefn, desc=desc)
1137 releasefn, desc=desc)
1130 if acquirefn:
1138 if acquirefn:
1131 acquirefn()
1139 acquirefn()
1132 return l
1140 return l
1133
1141
1134 def _afterlock(self, callback):
1142 def _afterlock(self, callback):
1135 """add a callback to the current repository lock.
1143 """add a callback to the current repository lock.
1136
1144
1137 The callback will be executed on lock release."""
1145 The callback will be executed on lock release."""
1138 l = self._lockref and self._lockref()
1146 l = self._lockref and self._lockref()
1139 if l:
1147 if l:
1140 l.postrelease.append(callback)
1148 l.postrelease.append(callback)
1141 else:
1149 else:
1142 callback()
1150 callback()
1143
1151
1144 def lock(self, wait=True):
1152 def lock(self, wait=True):
1145 '''Lock the repository store (.hg/store) and return a weak reference
1153 '''Lock the repository store (.hg/store) and return a weak reference
1146 to the lock. Use this before modifying the store (e.g. committing or
1154 to the lock. Use this before modifying the store (e.g. committing or
1147 stripping). If you are opening a transaction, get a lock as well.)'''
1155 stripping). If you are opening a transaction, get a lock as well.)'''
1148 l = self._lockref and self._lockref()
1156 l = self._lockref and self._lockref()
1149 if l is not None and l.held:
1157 if l is not None and l.held:
1150 l.lock()
1158 l.lock()
1151 return l
1159 return l
1152
1160
1153 def unlock():
1161 def unlock():
1154 self.store.write()
1162 self.store.write()
1155 if hasunfilteredcache(self, '_phasecache'):
1163 if hasunfilteredcache(self, '_phasecache'):
1156 self._phasecache.write()
1164 self._phasecache.write()
1157 for k, ce in self._filecache.items():
1165 for k, ce in self._filecache.items():
1158 if k == 'dirstate':
1166 if k == 'dirstate':
1159 continue
1167 continue
1160 ce.refresh()
1168 ce.refresh()
1161
1169
1162 l = self._lock(self.sjoin("lock"), wait, unlock,
1170 l = self._lock(self.sjoin("lock"), wait, unlock,
1163 self.invalidate, _('repository %s') % self.origroot)
1171 self.invalidate, _('repository %s') % self.origroot)
1164 self._lockref = weakref.ref(l)
1172 self._lockref = weakref.ref(l)
1165 return l
1173 return l
1166
1174
1167 def wlock(self, wait=True):
1175 def wlock(self, wait=True):
1168 '''Lock the non-store parts of the repository (everything under
1176 '''Lock the non-store parts of the repository (everything under
1169 .hg except .hg/store) and return a weak reference to the lock.
1177 .hg except .hg/store) and return a weak reference to the lock.
1170 Use this before modifying files in .hg.'''
1178 Use this before modifying files in .hg.'''
1171 l = self._wlockref and self._wlockref()
1179 l = self._wlockref and self._wlockref()
1172 if l is not None and l.held:
1180 if l is not None and l.held:
1173 l.lock()
1181 l.lock()
1174 return l
1182 return l
1175
1183
1176 def unlock():
1184 def unlock():
1177 self.dirstate.write()
1185 self.dirstate.write()
1178 ce = self._filecache.get('dirstate')
1186 ce = self._filecache.get('dirstate')
1179 if ce:
1187 if ce:
1180 ce.refresh()
1188 ce.refresh()
1181
1189
1182 l = self._lock(self.join("wlock"), wait, unlock,
1190 l = self._lock(self.join("wlock"), wait, unlock,
1183 self.invalidatedirstate, _('working directory of %s') %
1191 self.invalidatedirstate, _('working directory of %s') %
1184 self.origroot)
1192 self.origroot)
1185 self._wlockref = weakref.ref(l)
1193 self._wlockref = weakref.ref(l)
1186 return l
1194 return l
1187
1195
1188 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1196 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1189 """
1197 """
1190 commit an individual file as part of a larger transaction
1198 commit an individual file as part of a larger transaction
1191 """
1199 """
1192
1200
1193 fname = fctx.path()
1201 fname = fctx.path()
1194 text = fctx.data()
1202 text = fctx.data()
1195 flog = self.file(fname)
1203 flog = self.file(fname)
1196 fparent1 = manifest1.get(fname, nullid)
1204 fparent1 = manifest1.get(fname, nullid)
1197 fparent2 = fparent2o = manifest2.get(fname, nullid)
1205 fparent2 = fparent2o = manifest2.get(fname, nullid)
1198
1206
1199 meta = {}
1207 meta = {}
1200 copy = fctx.renamed()
1208 copy = fctx.renamed()
1201 if copy and copy[0] != fname:
1209 if copy and copy[0] != fname:
1202 # Mark the new revision of this file as a copy of another
1210 # Mark the new revision of this file as a copy of another
1203 # file. This copy data will effectively act as a parent
1211 # file. This copy data will effectively act as a parent
1204 # of this new revision. If this is a merge, the first
1212 # of this new revision. If this is a merge, the first
1205 # parent will be the nullid (meaning "look up the copy data")
1213 # parent will be the nullid (meaning "look up the copy data")
1206 # and the second one will be the other parent. For example:
1214 # and the second one will be the other parent. For example:
1207 #
1215 #
1208 # 0 --- 1 --- 3 rev1 changes file foo
1216 # 0 --- 1 --- 3 rev1 changes file foo
1209 # \ / rev2 renames foo to bar and changes it
1217 # \ / rev2 renames foo to bar and changes it
1210 # \- 2 -/ rev3 should have bar with all changes and
1218 # \- 2 -/ rev3 should have bar with all changes and
1211 # should record that bar descends from
1219 # should record that bar descends from
1212 # bar in rev2 and foo in rev1
1220 # bar in rev2 and foo in rev1
1213 #
1221 #
1214 # this allows this merge to succeed:
1222 # this allows this merge to succeed:
1215 #
1223 #
1216 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1224 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1217 # \ / merging rev3 and rev4 should use bar@rev2
1225 # \ / merging rev3 and rev4 should use bar@rev2
1218 # \- 2 --- 4 as the merge base
1226 # \- 2 --- 4 as the merge base
1219 #
1227 #
1220
1228
1221 cfname = copy[0]
1229 cfname = copy[0]
1222 crev = manifest1.get(cfname)
1230 crev = manifest1.get(cfname)
1223 newfparent = fparent2
1231 newfparent = fparent2
1224
1232
1225 if manifest2: # branch merge
1233 if manifest2: # branch merge
1226 if fparent2 == nullid or crev is None: # copied on remote side
1234 if fparent2 == nullid or crev is None: # copied on remote side
1227 if cfname in manifest2:
1235 if cfname in manifest2:
1228 crev = manifest2[cfname]
1236 crev = manifest2[cfname]
1229 newfparent = fparent1
1237 newfparent = fparent1
1230
1238
1231 # find source in nearest ancestor if we've lost track
1239 # find source in nearest ancestor if we've lost track
1232 if not crev:
1240 if not crev:
1233 self.ui.debug(" %s: searching for copy revision for %s\n" %
1241 self.ui.debug(" %s: searching for copy revision for %s\n" %
1234 (fname, cfname))
1242 (fname, cfname))
1235 for ancestor in self[None].ancestors():
1243 for ancestor in self[None].ancestors():
1236 if cfname in ancestor:
1244 if cfname in ancestor:
1237 crev = ancestor[cfname].filenode()
1245 crev = ancestor[cfname].filenode()
1238 break
1246 break
1239
1247
1240 if crev:
1248 if crev:
1241 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1249 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1242 meta["copy"] = cfname
1250 meta["copy"] = cfname
1243 meta["copyrev"] = hex(crev)
1251 meta["copyrev"] = hex(crev)
1244 fparent1, fparent2 = nullid, newfparent
1252 fparent1, fparent2 = nullid, newfparent
1245 else:
1253 else:
1246 self.ui.warn(_("warning: can't find ancestor for '%s' "
1254 self.ui.warn(_("warning: can't find ancestor for '%s' "
1247 "copied from '%s'!\n") % (fname, cfname))
1255 "copied from '%s'!\n") % (fname, cfname))
1248
1256
1249 elif fparent2 != nullid:
1257 elif fparent2 != nullid:
1250 # is one parent an ancestor of the other?
1258 # is one parent an ancestor of the other?
1251 fparentancestor = flog.ancestor(fparent1, fparent2)
1259 fparentancestor = flog.ancestor(fparent1, fparent2)
1252 if fparentancestor == fparent1:
1260 if fparentancestor == fparent1:
1253 fparent1, fparent2 = fparent2, nullid
1261 fparent1, fparent2 = fparent2, nullid
1254 elif fparentancestor == fparent2:
1262 elif fparentancestor == fparent2:
1255 fparent2 = nullid
1263 fparent2 = nullid
1256
1264
1257 # is the file changed?
1265 # is the file changed?
1258 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1266 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1259 changelist.append(fname)
1267 changelist.append(fname)
1260 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1268 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1261
1269
1262 # are just the flags changed during merge?
1270 # are just the flags changed during merge?
1263 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1271 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1264 changelist.append(fname)
1272 changelist.append(fname)
1265
1273
1266 return fparent1
1274 return fparent1
1267
1275
1268 @unfilteredmethod
1276 @unfilteredmethod
1269 def commit(self, text="", user=None, date=None, match=None, force=False,
1277 def commit(self, text="", user=None, date=None, match=None, force=False,
1270 editor=False, extra={}):
1278 editor=False, extra={}):
1271 """Add a new revision to current repository.
1279 """Add a new revision to current repository.
1272
1280
1273 Revision information is gathered from the working directory,
1281 Revision information is gathered from the working directory,
1274 match can be used to filter the committed files. If editor is
1282 match can be used to filter the committed files. If editor is
1275 supplied, it is called to get a commit message.
1283 supplied, it is called to get a commit message.
1276 """
1284 """
1277
1285
1278 def fail(f, msg):
1286 def fail(f, msg):
1279 raise util.Abort('%s: %s' % (f, msg))
1287 raise util.Abort('%s: %s' % (f, msg))
1280
1288
1281 if not match:
1289 if not match:
1282 match = matchmod.always(self.root, '')
1290 match = matchmod.always(self.root, '')
1283
1291
1284 if not force:
1292 if not force:
1285 vdirs = []
1293 vdirs = []
1286 match.dir = vdirs.append
1294 match.dir = vdirs.append
1287 match.bad = fail
1295 match.bad = fail
1288
1296
1289 wlock = self.wlock()
1297 wlock = self.wlock()
1290 try:
1298 try:
1291 wctx = self[None]
1299 wctx = self[None]
1292 merge = len(wctx.parents()) > 1
1300 merge = len(wctx.parents()) > 1
1293
1301
1294 if (not force and merge and match and
1302 if (not force and merge and match and
1295 (match.files() or match.anypats())):
1303 (match.files() or match.anypats())):
1296 raise util.Abort(_('cannot partially commit a merge '
1304 raise util.Abort(_('cannot partially commit a merge '
1297 '(do not specify files or patterns)'))
1305 '(do not specify files or patterns)'))
1298
1306
1299 changes = self.status(match=match, clean=force)
1307 changes = self.status(match=match, clean=force)
1300 if force:
1308 if force:
1301 changes[0].extend(changes[6]) # mq may commit unchanged files
1309 changes[0].extend(changes[6]) # mq may commit unchanged files
1302
1310
1303 # check subrepos
1311 # check subrepos
1304 subs = []
1312 subs = []
1305 commitsubs = set()
1313 commitsubs = set()
1306 newstate = wctx.substate.copy()
1314 newstate = wctx.substate.copy()
1307 # only manage subrepos and .hgsubstate if .hgsub is present
1315 # only manage subrepos and .hgsubstate if .hgsub is present
1308 if '.hgsub' in wctx:
1316 if '.hgsub' in wctx:
1309 # we'll decide whether to track this ourselves, thanks
1317 # we'll decide whether to track this ourselves, thanks
1310 if '.hgsubstate' in changes[0]:
1318 if '.hgsubstate' in changes[0]:
1311 changes[0].remove('.hgsubstate')
1319 changes[0].remove('.hgsubstate')
1312 if '.hgsubstate' in changes[2]:
1320 if '.hgsubstate' in changes[2]:
1313 changes[2].remove('.hgsubstate')
1321 changes[2].remove('.hgsubstate')
1314
1322
1315 # compare current state to last committed state
1323 # compare current state to last committed state
1316 # build new substate based on last committed state
1324 # build new substate based on last committed state
1317 oldstate = wctx.p1().substate
1325 oldstate = wctx.p1().substate
1318 for s in sorted(newstate.keys()):
1326 for s in sorted(newstate.keys()):
1319 if not match(s):
1327 if not match(s):
1320 # ignore working copy, use old state if present
1328 # ignore working copy, use old state if present
1321 if s in oldstate:
1329 if s in oldstate:
1322 newstate[s] = oldstate[s]
1330 newstate[s] = oldstate[s]
1323 continue
1331 continue
1324 if not force:
1332 if not force:
1325 raise util.Abort(
1333 raise util.Abort(
1326 _("commit with new subrepo %s excluded") % s)
1334 _("commit with new subrepo %s excluded") % s)
1327 if wctx.sub(s).dirty(True):
1335 if wctx.sub(s).dirty(True):
1328 if not self.ui.configbool('ui', 'commitsubrepos'):
1336 if not self.ui.configbool('ui', 'commitsubrepos'):
1329 raise util.Abort(
1337 raise util.Abort(
1330 _("uncommitted changes in subrepo %s") % s,
1338 _("uncommitted changes in subrepo %s") % s,
1331 hint=_("use --subrepos for recursive commit"))
1339 hint=_("use --subrepos for recursive commit"))
1332 subs.append(s)
1340 subs.append(s)
1333 commitsubs.add(s)
1341 commitsubs.add(s)
1334 else:
1342 else:
1335 bs = wctx.sub(s).basestate()
1343 bs = wctx.sub(s).basestate()
1336 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1344 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1337 if oldstate.get(s, (None, None, None))[1] != bs:
1345 if oldstate.get(s, (None, None, None))[1] != bs:
1338 subs.append(s)
1346 subs.append(s)
1339
1347
1340 # check for removed subrepos
1348 # check for removed subrepos
1341 for p in wctx.parents():
1349 for p in wctx.parents():
1342 r = [s for s in p.substate if s not in newstate]
1350 r = [s for s in p.substate if s not in newstate]
1343 subs += [s for s in r if match(s)]
1351 subs += [s for s in r if match(s)]
1344 if subs:
1352 if subs:
1345 if (not match('.hgsub') and
1353 if (not match('.hgsub') and
1346 '.hgsub' in (wctx.modified() + wctx.added())):
1354 '.hgsub' in (wctx.modified() + wctx.added())):
1347 raise util.Abort(
1355 raise util.Abort(
1348 _("can't commit subrepos without .hgsub"))
1356 _("can't commit subrepos without .hgsub"))
1349 changes[0].insert(0, '.hgsubstate')
1357 changes[0].insert(0, '.hgsubstate')
1350
1358
1351 elif '.hgsub' in changes[2]:
1359 elif '.hgsub' in changes[2]:
1352 # clean up .hgsubstate when .hgsub is removed
1360 # clean up .hgsubstate when .hgsub is removed
1353 if ('.hgsubstate' in wctx and
1361 if ('.hgsubstate' in wctx and
1354 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1362 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1355 changes[2].insert(0, '.hgsubstate')
1363 changes[2].insert(0, '.hgsubstate')
1356
1364
1357 # make sure all explicit patterns are matched
1365 # make sure all explicit patterns are matched
1358 if not force and match.files():
1366 if not force and match.files():
1359 matched = set(changes[0] + changes[1] + changes[2])
1367 matched = set(changes[0] + changes[1] + changes[2])
1360
1368
1361 for f in match.files():
1369 for f in match.files():
1362 f = self.dirstate.normalize(f)
1370 f = self.dirstate.normalize(f)
1363 if f == '.' or f in matched or f in wctx.substate:
1371 if f == '.' or f in matched or f in wctx.substate:
1364 continue
1372 continue
1365 if f in changes[3]: # missing
1373 if f in changes[3]: # missing
1366 fail(f, _('file not found!'))
1374 fail(f, _('file not found!'))
1367 if f in vdirs: # visited directory
1375 if f in vdirs: # visited directory
1368 d = f + '/'
1376 d = f + '/'
1369 for mf in matched:
1377 for mf in matched:
1370 if mf.startswith(d):
1378 if mf.startswith(d):
1371 break
1379 break
1372 else:
1380 else:
1373 fail(f, _("no match under directory!"))
1381 fail(f, _("no match under directory!"))
1374 elif f not in self.dirstate:
1382 elif f not in self.dirstate:
1375 fail(f, _("file not tracked!"))
1383 fail(f, _("file not tracked!"))
1376
1384
1377 if (not force and not extra.get("close") and not merge
1385 if (not force and not extra.get("close") and not merge
1378 and not (changes[0] or changes[1] or changes[2])
1386 and not (changes[0] or changes[1] or changes[2])
1379 and wctx.branch() == wctx.p1().branch()):
1387 and wctx.branch() == wctx.p1().branch()):
1380 return None
1388 return None
1381
1389
1382 if merge and changes[3]:
1390 if merge and changes[3]:
1383 raise util.Abort(_("cannot commit merge with missing files"))
1391 raise util.Abort(_("cannot commit merge with missing files"))
1384
1392
1385 ms = mergemod.mergestate(self)
1393 ms = mergemod.mergestate(self)
1386 for f in changes[0]:
1394 for f in changes[0]:
1387 if f in ms and ms[f] == 'u':
1395 if f in ms and ms[f] == 'u':
1388 raise util.Abort(_("unresolved merge conflicts "
1396 raise util.Abort(_("unresolved merge conflicts "
1389 "(see hg help resolve)"))
1397 "(see hg help resolve)"))
1390
1398
1391 cctx = context.workingctx(self, text, user, date, extra, changes)
1399 cctx = context.workingctx(self, text, user, date, extra, changes)
1392 if editor:
1400 if editor:
1393 cctx._text = editor(self, cctx, subs)
1401 cctx._text = editor(self, cctx, subs)
1394 edited = (text != cctx._text)
1402 edited = (text != cctx._text)
1395
1403
1396 # commit subs and write new state
1404 # commit subs and write new state
1397 if subs:
1405 if subs:
1398 for s in sorted(commitsubs):
1406 for s in sorted(commitsubs):
1399 sub = wctx.sub(s)
1407 sub = wctx.sub(s)
1400 self.ui.status(_('committing subrepository %s\n') %
1408 self.ui.status(_('committing subrepository %s\n') %
1401 subrepo.subrelpath(sub))
1409 subrepo.subrelpath(sub))
1402 sr = sub.commit(cctx._text, user, date)
1410 sr = sub.commit(cctx._text, user, date)
1403 newstate[s] = (newstate[s][0], sr)
1411 newstate[s] = (newstate[s][0], sr)
1404 subrepo.writestate(self, newstate)
1412 subrepo.writestate(self, newstate)
1405
1413
1406 # Save commit message in case this transaction gets rolled back
1414 # Save commit message in case this transaction gets rolled back
1407 # (e.g. by a pretxncommit hook). Leave the content alone on
1415 # (e.g. by a pretxncommit hook). Leave the content alone on
1408 # the assumption that the user will use the same editor again.
1416 # the assumption that the user will use the same editor again.
1409 msgfn = self.savecommitmessage(cctx._text)
1417 msgfn = self.savecommitmessage(cctx._text)
1410
1418
1411 p1, p2 = self.dirstate.parents()
1419 p1, p2 = self.dirstate.parents()
1412 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1420 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1413 try:
1421 try:
1414 self.hook("precommit", throw=True, parent1=hookp1,
1422 self.hook("precommit", throw=True, parent1=hookp1,
1415 parent2=hookp2)
1423 parent2=hookp2)
1416 ret = self.commitctx(cctx, True)
1424 ret = self.commitctx(cctx, True)
1417 except: # re-raises
1425 except: # re-raises
1418 if edited:
1426 if edited:
1419 self.ui.write(
1427 self.ui.write(
1420 _('note: commit message saved in %s\n') % msgfn)
1428 _('note: commit message saved in %s\n') % msgfn)
1421 raise
1429 raise
1422
1430
1423 # update bookmarks, dirstate and mergestate
1431 # update bookmarks, dirstate and mergestate
1424 bookmarks.update(self, [p1, p2], ret)
1432 bookmarks.update(self, [p1, p2], ret)
1425 for f in changes[0] + changes[1]:
1433 for f in changes[0] + changes[1]:
1426 self.dirstate.normal(f)
1434 self.dirstate.normal(f)
1427 for f in changes[2]:
1435 for f in changes[2]:
1428 self.dirstate.drop(f)
1436 self.dirstate.drop(f)
1429 self.dirstate.setparents(ret)
1437 self.dirstate.setparents(ret)
1430 ms.reset()
1438 ms.reset()
1431 finally:
1439 finally:
1432 wlock.release()
1440 wlock.release()
1433
1441
1434 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1442 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1435 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1443 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1436 self._afterlock(commithook)
1444 self._afterlock(commithook)
1437 return ret
1445 return ret
1438
1446
1439 @unfilteredmethod
1447 @unfilteredmethod
1440 def commitctx(self, ctx, error=False):
1448 def commitctx(self, ctx, error=False):
1441 """Add a new revision to current repository.
1449 """Add a new revision to current repository.
1442 Revision information is passed via the context argument.
1450 Revision information is passed via the context argument.
1443 """
1451 """
1444
1452
1445 tr = lock = None
1453 tr = lock = None
1446 removed = list(ctx.removed())
1454 removed = list(ctx.removed())
1447 p1, p2 = ctx.p1(), ctx.p2()
1455 p1, p2 = ctx.p1(), ctx.p2()
1448 user = ctx.user()
1456 user = ctx.user()
1449
1457
1450 lock = self.lock()
1458 lock = self.lock()
1451 try:
1459 try:
1452 tr = self.transaction("commit")
1460 tr = self.transaction("commit")
1453 trp = weakref.proxy(tr)
1461 trp = weakref.proxy(tr)
1454
1462
1455 if ctx.files():
1463 if ctx.files():
1456 m1 = p1.manifest().copy()
1464 m1 = p1.manifest().copy()
1457 m2 = p2.manifest()
1465 m2 = p2.manifest()
1458
1466
1459 # check in files
1467 # check in files
1460 new = {}
1468 new = {}
1461 changed = []
1469 changed = []
1462 linkrev = len(self)
1470 linkrev = len(self)
1463 for f in sorted(ctx.modified() + ctx.added()):
1471 for f in sorted(ctx.modified() + ctx.added()):
1464 self.ui.note(f + "\n")
1472 self.ui.note(f + "\n")
1465 try:
1473 try:
1466 fctx = ctx[f]
1474 fctx = ctx[f]
1467 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1475 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1468 changed)
1476 changed)
1469 m1.set(f, fctx.flags())
1477 m1.set(f, fctx.flags())
1470 except OSError, inst:
1478 except OSError, inst:
1471 self.ui.warn(_("trouble committing %s!\n") % f)
1479 self.ui.warn(_("trouble committing %s!\n") % f)
1472 raise
1480 raise
1473 except IOError, inst:
1481 except IOError, inst:
1474 errcode = getattr(inst, 'errno', errno.ENOENT)
1482 errcode = getattr(inst, 'errno', errno.ENOENT)
1475 if error or errcode and errcode != errno.ENOENT:
1483 if error or errcode and errcode != errno.ENOENT:
1476 self.ui.warn(_("trouble committing %s!\n") % f)
1484 self.ui.warn(_("trouble committing %s!\n") % f)
1477 raise
1485 raise
1478 else:
1486 else:
1479 removed.append(f)
1487 removed.append(f)
1480
1488
1481 # update manifest
1489 # update manifest
1482 m1.update(new)
1490 m1.update(new)
1483 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1491 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1484 drop = [f for f in removed if f in m1]
1492 drop = [f for f in removed if f in m1]
1485 for f in drop:
1493 for f in drop:
1486 del m1[f]
1494 del m1[f]
1487 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1495 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1488 p2.manifestnode(), (new, drop))
1496 p2.manifestnode(), (new, drop))
1489 files = changed + removed
1497 files = changed + removed
1490 else:
1498 else:
1491 mn = p1.manifestnode()
1499 mn = p1.manifestnode()
1492 files = []
1500 files = []
1493
1501
1494 # update changelog
1502 # update changelog
1495 self.changelog.delayupdate()
1503 self.changelog.delayupdate()
1496 n = self.changelog.add(mn, files, ctx.description(),
1504 n = self.changelog.add(mn, files, ctx.description(),
1497 trp, p1.node(), p2.node(),
1505 trp, p1.node(), p2.node(),
1498 user, ctx.date(), ctx.extra().copy())
1506 user, ctx.date(), ctx.extra().copy())
1499 p = lambda: self.changelog.writepending() and self.root or ""
1507 p = lambda: self.changelog.writepending() and self.root or ""
1500 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1508 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1501 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1509 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1502 parent2=xp2, pending=p)
1510 parent2=xp2, pending=p)
1503 self.changelog.finalize(trp)
1511 self.changelog.finalize(trp)
1504 # set the new commit is proper phase
1512 # set the new commit is proper phase
1505 targetphase = phases.newcommitphase(self.ui)
1513 targetphase = phases.newcommitphase(self.ui)
1506 if targetphase:
1514 if targetphase:
1507 # retract boundary do not alter parent changeset.
1515 # retract boundary do not alter parent changeset.
1508 # if a parent have higher the resulting phase will
1516 # if a parent have higher the resulting phase will
1509 # be compliant anyway
1517 # be compliant anyway
1510 #
1518 #
1511 # if minimal phase was 0 we don't need to retract anything
1519 # if minimal phase was 0 we don't need to retract anything
1512 phases.retractboundary(self, targetphase, [n])
1520 phases.retractboundary(self, targetphase, [n])
1513 tr.close()
1521 tr.close()
1514 self.updatebranchcache()
1522 self.updatebranchcache()
1515 return n
1523 return n
1516 finally:
1524 finally:
1517 if tr:
1525 if tr:
1518 tr.release()
1526 tr.release()
1519 lock.release()
1527 lock.release()
1520
1528
1521 @unfilteredmethod
1529 @unfilteredmethod
1522 def destroyed(self, newheadnodes=None):
1530 def destroyed(self, newheadnodes=None):
1523 '''Inform the repository that nodes have been destroyed.
1531 '''Inform the repository that nodes have been destroyed.
1524 Intended for use by strip and rollback, so there's a common
1532 Intended for use by strip and rollback, so there's a common
1525 place for anything that has to be done after destroying history.
1533 place for anything that has to be done after destroying history.
1526
1534
1527 If you know the branchheadcache was uptodate before nodes were removed
1535 If you know the branchheadcache was uptodate before nodes were removed
1528 and you also know the set of candidate new heads that may have resulted
1536 and you also know the set of candidate new heads that may have resulted
1529 from the destruction, you can set newheadnodes. This will enable the
1537 from the destruction, you can set newheadnodes. This will enable the
1530 code to update the branchheads cache, rather than having future code
1538 code to update the branchheads cache, rather than having future code
1531 decide it's invalid and regenerating it from scratch.
1539 decide it's invalid and regenerating it from scratch.
1532 '''
1540 '''
1533 # If we have info, newheadnodes, on how to update the branch cache, do
1541 # If we have info, newheadnodes, on how to update the branch cache, do
1534 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1542 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1535 # will be caught the next time it is read.
1543 # will be caught the next time it is read.
1536 if newheadnodes:
1544 if newheadnodes:
1537 tiprev = len(self) - 1
1545 tiprev = len(self) - 1
1538 ctxgen = (self[node] for node in newheadnodes
1546 ctxgen = (self[node] for node in newheadnodes
1539 if self.changelog.hasnode(node))
1547 if self.changelog.hasnode(node))
1540 self._updatebranchcache(self._branchcache, ctxgen)
1548 self._updatebranchcache(self._branchcache, ctxgen)
1541 self._writebranchcache(self._branchcache, self.changelog.tip(),
1549 self._writebranchcache(self._branchcache, self.changelog.tip(),
1542 tiprev)
1550 tiprev)
1543
1551
1544 # Ensure the persistent tag cache is updated. Doing it now
1552 # Ensure the persistent tag cache is updated. Doing it now
1545 # means that the tag cache only has to worry about destroyed
1553 # means that the tag cache only has to worry about destroyed
1546 # heads immediately after a strip/rollback. That in turn
1554 # heads immediately after a strip/rollback. That in turn
1547 # guarantees that "cachetip == currenttip" (comparing both rev
1555 # guarantees that "cachetip == currenttip" (comparing both rev
1548 # and node) always means no nodes have been added or destroyed.
1556 # and node) always means no nodes have been added or destroyed.
1549
1557
1550 # XXX this is suboptimal when qrefresh'ing: we strip the current
1558 # XXX this is suboptimal when qrefresh'ing: we strip the current
1551 # head, refresh the tag cache, then immediately add a new head.
1559 # head, refresh the tag cache, then immediately add a new head.
1552 # But I think doing it this way is necessary for the "instant
1560 # But I think doing it this way is necessary for the "instant
1553 # tag cache retrieval" case to work.
1561 # tag cache retrieval" case to work.
1554 self.invalidatecaches()
1562 self.invalidatecaches()
1555
1563
1556 # Discard all cache entries to force reloading everything.
1564 # Discard all cache entries to force reloading everything.
1557 self._filecache.clear()
1565 self._filecache.clear()
1558
1566
1559 def walk(self, match, node=None):
1567 def walk(self, match, node=None):
1560 '''
1568 '''
1561 walk recursively through the directory tree or a given
1569 walk recursively through the directory tree or a given
1562 changeset, finding all files matched by the match
1570 changeset, finding all files matched by the match
1563 function
1571 function
1564 '''
1572 '''
1565 return self[node].walk(match)
1573 return self[node].walk(match)
1566
1574
1567 def status(self, node1='.', node2=None, match=None,
1575 def status(self, node1='.', node2=None, match=None,
1568 ignored=False, clean=False, unknown=False,
1576 ignored=False, clean=False, unknown=False,
1569 listsubrepos=False):
1577 listsubrepos=False):
1570 """return status of files between two nodes or node and working
1578 """return status of files between two nodes or node and working
1571 directory.
1579 directory.
1572
1580
1573 If node1 is None, use the first dirstate parent instead.
1581 If node1 is None, use the first dirstate parent instead.
1574 If node2 is None, compare node1 with working directory.
1582 If node2 is None, compare node1 with working directory.
1575 """
1583 """
1576
1584
1577 def mfmatches(ctx):
1585 def mfmatches(ctx):
1578 mf = ctx.manifest().copy()
1586 mf = ctx.manifest().copy()
1579 if match.always():
1587 if match.always():
1580 return mf
1588 return mf
1581 for fn in mf.keys():
1589 for fn in mf.keys():
1582 if not match(fn):
1590 if not match(fn):
1583 del mf[fn]
1591 del mf[fn]
1584 return mf
1592 return mf
1585
1593
1586 if isinstance(node1, context.changectx):
1594 if isinstance(node1, context.changectx):
1587 ctx1 = node1
1595 ctx1 = node1
1588 else:
1596 else:
1589 ctx1 = self[node1]
1597 ctx1 = self[node1]
1590 if isinstance(node2, context.changectx):
1598 if isinstance(node2, context.changectx):
1591 ctx2 = node2
1599 ctx2 = node2
1592 else:
1600 else:
1593 ctx2 = self[node2]
1601 ctx2 = self[node2]
1594
1602
1595 working = ctx2.rev() is None
1603 working = ctx2.rev() is None
1596 parentworking = working and ctx1 == self['.']
1604 parentworking = working and ctx1 == self['.']
1597 match = match or matchmod.always(self.root, self.getcwd())
1605 match = match or matchmod.always(self.root, self.getcwd())
1598 listignored, listclean, listunknown = ignored, clean, unknown
1606 listignored, listclean, listunknown = ignored, clean, unknown
1599
1607
1600 # load earliest manifest first for caching reasons
1608 # load earliest manifest first for caching reasons
1601 if not working and ctx2.rev() < ctx1.rev():
1609 if not working and ctx2.rev() < ctx1.rev():
1602 ctx2.manifest()
1610 ctx2.manifest()
1603
1611
1604 if not parentworking:
1612 if not parentworking:
1605 def bad(f, msg):
1613 def bad(f, msg):
1606 # 'f' may be a directory pattern from 'match.files()',
1614 # 'f' may be a directory pattern from 'match.files()',
1607 # so 'f not in ctx1' is not enough
1615 # so 'f not in ctx1' is not enough
1608 if f not in ctx1 and f not in ctx1.dirs():
1616 if f not in ctx1 and f not in ctx1.dirs():
1609 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1617 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1610 match.bad = bad
1618 match.bad = bad
1611
1619
1612 if working: # we need to scan the working dir
1620 if working: # we need to scan the working dir
1613 subrepos = []
1621 subrepos = []
1614 if '.hgsub' in self.dirstate:
1622 if '.hgsub' in self.dirstate:
1615 subrepos = ctx2.substate.keys()
1623 subrepos = ctx2.substate.keys()
1616 s = self.dirstate.status(match, subrepos, listignored,
1624 s = self.dirstate.status(match, subrepos, listignored,
1617 listclean, listunknown)
1625 listclean, listunknown)
1618 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1626 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1619
1627
1620 # check for any possibly clean files
1628 # check for any possibly clean files
1621 if parentworking and cmp:
1629 if parentworking and cmp:
1622 fixup = []
1630 fixup = []
1623 # do a full compare of any files that might have changed
1631 # do a full compare of any files that might have changed
1624 for f in sorted(cmp):
1632 for f in sorted(cmp):
1625 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1633 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1626 or ctx1[f].cmp(ctx2[f])):
1634 or ctx1[f].cmp(ctx2[f])):
1627 modified.append(f)
1635 modified.append(f)
1628 else:
1636 else:
1629 fixup.append(f)
1637 fixup.append(f)
1630
1638
1631 # update dirstate for files that are actually clean
1639 # update dirstate for files that are actually clean
1632 if fixup:
1640 if fixup:
1633 if listclean:
1641 if listclean:
1634 clean += fixup
1642 clean += fixup
1635
1643
1636 try:
1644 try:
1637 # updating the dirstate is optional
1645 # updating the dirstate is optional
1638 # so we don't wait on the lock
1646 # so we don't wait on the lock
1639 wlock = self.wlock(False)
1647 wlock = self.wlock(False)
1640 try:
1648 try:
1641 for f in fixup:
1649 for f in fixup:
1642 self.dirstate.normal(f)
1650 self.dirstate.normal(f)
1643 finally:
1651 finally:
1644 wlock.release()
1652 wlock.release()
1645 except error.LockError:
1653 except error.LockError:
1646 pass
1654 pass
1647
1655
1648 if not parentworking:
1656 if not parentworking:
1649 mf1 = mfmatches(ctx1)
1657 mf1 = mfmatches(ctx1)
1650 if working:
1658 if working:
1651 # we are comparing working dir against non-parent
1659 # we are comparing working dir against non-parent
1652 # generate a pseudo-manifest for the working dir
1660 # generate a pseudo-manifest for the working dir
1653 mf2 = mfmatches(self['.'])
1661 mf2 = mfmatches(self['.'])
1654 for f in cmp + modified + added:
1662 for f in cmp + modified + added:
1655 mf2[f] = None
1663 mf2[f] = None
1656 mf2.set(f, ctx2.flags(f))
1664 mf2.set(f, ctx2.flags(f))
1657 for f in removed:
1665 for f in removed:
1658 if f in mf2:
1666 if f in mf2:
1659 del mf2[f]
1667 del mf2[f]
1660 else:
1668 else:
1661 # we are comparing two revisions
1669 # we are comparing two revisions
1662 deleted, unknown, ignored = [], [], []
1670 deleted, unknown, ignored = [], [], []
1663 mf2 = mfmatches(ctx2)
1671 mf2 = mfmatches(ctx2)
1664
1672
1665 modified, added, clean = [], [], []
1673 modified, added, clean = [], [], []
1666 withflags = mf1.withflags() | mf2.withflags()
1674 withflags = mf1.withflags() | mf2.withflags()
1667 for fn in mf2:
1675 for fn in mf2:
1668 if fn in mf1:
1676 if fn in mf1:
1669 if (fn not in deleted and
1677 if (fn not in deleted and
1670 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1678 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1671 (mf1[fn] != mf2[fn] and
1679 (mf1[fn] != mf2[fn] and
1672 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1680 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1673 modified.append(fn)
1681 modified.append(fn)
1674 elif listclean:
1682 elif listclean:
1675 clean.append(fn)
1683 clean.append(fn)
1676 del mf1[fn]
1684 del mf1[fn]
1677 elif fn not in deleted:
1685 elif fn not in deleted:
1678 added.append(fn)
1686 added.append(fn)
1679 removed = mf1.keys()
1687 removed = mf1.keys()
1680
1688
1681 if working and modified and not self.dirstate._checklink:
1689 if working and modified and not self.dirstate._checklink:
1682 # Symlink placeholders may get non-symlink-like contents
1690 # Symlink placeholders may get non-symlink-like contents
1683 # via user error or dereferencing by NFS or Samba servers,
1691 # via user error or dereferencing by NFS or Samba servers,
1684 # so we filter out any placeholders that don't look like a
1692 # so we filter out any placeholders that don't look like a
1685 # symlink
1693 # symlink
1686 sane = []
1694 sane = []
1687 for f in modified:
1695 for f in modified:
1688 if ctx2.flags(f) == 'l':
1696 if ctx2.flags(f) == 'l':
1689 d = ctx2[f].data()
1697 d = ctx2[f].data()
1690 if len(d) >= 1024 or '\n' in d or util.binary(d):
1698 if len(d) >= 1024 or '\n' in d or util.binary(d):
1691 self.ui.debug('ignoring suspect symlink placeholder'
1699 self.ui.debug('ignoring suspect symlink placeholder'
1692 ' "%s"\n' % f)
1700 ' "%s"\n' % f)
1693 continue
1701 continue
1694 sane.append(f)
1702 sane.append(f)
1695 modified = sane
1703 modified = sane
1696
1704
1697 r = modified, added, removed, deleted, unknown, ignored, clean
1705 r = modified, added, removed, deleted, unknown, ignored, clean
1698
1706
1699 if listsubrepos:
1707 if listsubrepos:
1700 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1708 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1701 if working:
1709 if working:
1702 rev2 = None
1710 rev2 = None
1703 else:
1711 else:
1704 rev2 = ctx2.substate[subpath][1]
1712 rev2 = ctx2.substate[subpath][1]
1705 try:
1713 try:
1706 submatch = matchmod.narrowmatcher(subpath, match)
1714 submatch = matchmod.narrowmatcher(subpath, match)
1707 s = sub.status(rev2, match=submatch, ignored=listignored,
1715 s = sub.status(rev2, match=submatch, ignored=listignored,
1708 clean=listclean, unknown=listunknown,
1716 clean=listclean, unknown=listunknown,
1709 listsubrepos=True)
1717 listsubrepos=True)
1710 for rfiles, sfiles in zip(r, s):
1718 for rfiles, sfiles in zip(r, s):
1711 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1719 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1712 except error.LookupError:
1720 except error.LookupError:
1713 self.ui.status(_("skipping missing subrepository: %s\n")
1721 self.ui.status(_("skipping missing subrepository: %s\n")
1714 % subpath)
1722 % subpath)
1715
1723
1716 for l in r:
1724 for l in r:
1717 l.sort()
1725 l.sort()
1718 return r
1726 return r
1719
1727
1720 def heads(self, start=None):
1728 def heads(self, start=None):
1721 heads = self.changelog.heads(start)
1729 heads = self.changelog.heads(start)
1722 # sort the output in rev descending order
1730 # sort the output in rev descending order
1723 return sorted(heads, key=self.changelog.rev, reverse=True)
1731 return sorted(heads, key=self.changelog.rev, reverse=True)
1724
1732
1725 def branchheads(self, branch=None, start=None, closed=False):
1733 def branchheads(self, branch=None, start=None, closed=False):
1726 '''return a (possibly filtered) list of heads for the given branch
1734 '''return a (possibly filtered) list of heads for the given branch
1727
1735
1728 Heads are returned in topological order, from newest to oldest.
1736 Heads are returned in topological order, from newest to oldest.
1729 If branch is None, use the dirstate branch.
1737 If branch is None, use the dirstate branch.
1730 If start is not None, return only heads reachable from start.
1738 If start is not None, return only heads reachable from start.
1731 If closed is True, return heads that are marked as closed as well.
1739 If closed is True, return heads that are marked as closed as well.
1732 '''
1740 '''
1733 if branch is None:
1741 if branch is None:
1734 branch = self[None].branch()
1742 branch = self[None].branch()
1735 branches = self.branchmap()
1743 branches = self.branchmap()
1736 if branch not in branches:
1744 if branch not in branches:
1737 return []
1745 return []
1738 # the cache returns heads ordered lowest to highest
1746 # the cache returns heads ordered lowest to highest
1739 bheads = list(reversed(branches[branch]))
1747 bheads = list(reversed(branches[branch]))
1740 if start is not None:
1748 if start is not None:
1741 # filter out the heads that cannot be reached from startrev
1749 # filter out the heads that cannot be reached from startrev
1742 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1750 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1743 bheads = [h for h in bheads if h in fbheads]
1751 bheads = [h for h in bheads if h in fbheads]
1744 if not closed:
1752 if not closed:
1745 bheads = [h for h in bheads if not self[h].closesbranch()]
1753 bheads = [h for h in bheads if not self[h].closesbranch()]
1746 return bheads
1754 return bheads
1747
1755
1748 def branches(self, nodes):
1756 def branches(self, nodes):
1749 if not nodes:
1757 if not nodes:
1750 nodes = [self.changelog.tip()]
1758 nodes = [self.changelog.tip()]
1751 b = []
1759 b = []
1752 for n in nodes:
1760 for n in nodes:
1753 t = n
1761 t = n
1754 while True:
1762 while True:
1755 p = self.changelog.parents(n)
1763 p = self.changelog.parents(n)
1756 if p[1] != nullid or p[0] == nullid:
1764 if p[1] != nullid or p[0] == nullid:
1757 b.append((t, n, p[0], p[1]))
1765 b.append((t, n, p[0], p[1]))
1758 break
1766 break
1759 n = p[0]
1767 n = p[0]
1760 return b
1768 return b
1761
1769
1762 def between(self, pairs):
1770 def between(self, pairs):
1763 r = []
1771 r = []
1764
1772
1765 for top, bottom in pairs:
1773 for top, bottom in pairs:
1766 n, l, i = top, [], 0
1774 n, l, i = top, [], 0
1767 f = 1
1775 f = 1
1768
1776
1769 while n != bottom and n != nullid:
1777 while n != bottom and n != nullid:
1770 p = self.changelog.parents(n)[0]
1778 p = self.changelog.parents(n)[0]
1771 if i == f:
1779 if i == f:
1772 l.append(n)
1780 l.append(n)
1773 f = f * 2
1781 f = f * 2
1774 n = p
1782 n = p
1775 i += 1
1783 i += 1
1776
1784
1777 r.append(l)
1785 r.append(l)
1778
1786
1779 return r
1787 return r
1780
1788
1781 def pull(self, remote, heads=None, force=False):
1789 def pull(self, remote, heads=None, force=False):
1782 # don't open transaction for nothing or you break future useful
1790 # don't open transaction for nothing or you break future useful
1783 # rollback call
1791 # rollback call
1784 tr = None
1792 tr = None
1785 trname = 'pull\n' + util.hidepassword(remote.url())
1793 trname = 'pull\n' + util.hidepassword(remote.url())
1786 lock = self.lock()
1794 lock = self.lock()
1787 try:
1795 try:
1788 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1796 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1789 force=force)
1797 force=force)
1790 common, fetch, rheads = tmp
1798 common, fetch, rheads = tmp
1791 if not fetch:
1799 if not fetch:
1792 self.ui.status(_("no changes found\n"))
1800 self.ui.status(_("no changes found\n"))
1793 added = []
1801 added = []
1794 result = 0
1802 result = 0
1795 else:
1803 else:
1796 tr = self.transaction(trname)
1804 tr = self.transaction(trname)
1797 if heads is None and list(common) == [nullid]:
1805 if heads is None and list(common) == [nullid]:
1798 self.ui.status(_("requesting all changes\n"))
1806 self.ui.status(_("requesting all changes\n"))
1799 elif heads is None and remote.capable('changegroupsubset'):
1807 elif heads is None and remote.capable('changegroupsubset'):
1800 # issue1320, avoid a race if remote changed after discovery
1808 # issue1320, avoid a race if remote changed after discovery
1801 heads = rheads
1809 heads = rheads
1802
1810
1803 if remote.capable('getbundle'):
1811 if remote.capable('getbundle'):
1804 cg = remote.getbundle('pull', common=common,
1812 cg = remote.getbundle('pull', common=common,
1805 heads=heads or rheads)
1813 heads=heads or rheads)
1806 elif heads is None:
1814 elif heads is None:
1807 cg = remote.changegroup(fetch, 'pull')
1815 cg = remote.changegroup(fetch, 'pull')
1808 elif not remote.capable('changegroupsubset'):
1816 elif not remote.capable('changegroupsubset'):
1809 raise util.Abort(_("partial pull cannot be done because "
1817 raise util.Abort(_("partial pull cannot be done because "
1810 "other repository doesn't support "
1818 "other repository doesn't support "
1811 "changegroupsubset."))
1819 "changegroupsubset."))
1812 else:
1820 else:
1813 cg = remote.changegroupsubset(fetch, heads, 'pull')
1821 cg = remote.changegroupsubset(fetch, heads, 'pull')
1814 clstart = len(self.changelog)
1822 clstart = len(self.changelog)
1815 result = self.addchangegroup(cg, 'pull', remote.url())
1823 result = self.addchangegroup(cg, 'pull', remote.url())
1816 clend = len(self.changelog)
1824 clend = len(self.changelog)
1817 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1825 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1818
1826
1819 # compute target subset
1827 # compute target subset
1820 if heads is None:
1828 if heads is None:
1821 # We pulled every thing possible
1829 # We pulled every thing possible
1822 # sync on everything common
1830 # sync on everything common
1823 subset = common + added
1831 subset = common + added
1824 else:
1832 else:
1825 # We pulled a specific subset
1833 # We pulled a specific subset
1826 # sync on this subset
1834 # sync on this subset
1827 subset = heads
1835 subset = heads
1828
1836
1829 # Get remote phases data from remote
1837 # Get remote phases data from remote
1830 remotephases = remote.listkeys('phases')
1838 remotephases = remote.listkeys('phases')
1831 publishing = bool(remotephases.get('publishing', False))
1839 publishing = bool(remotephases.get('publishing', False))
1832 if remotephases and not publishing:
1840 if remotephases and not publishing:
1833 # remote is new and unpublishing
1841 # remote is new and unpublishing
1834 pheads, _dr = phases.analyzeremotephases(self, subset,
1842 pheads, _dr = phases.analyzeremotephases(self, subset,
1835 remotephases)
1843 remotephases)
1836 phases.advanceboundary(self, phases.public, pheads)
1844 phases.advanceboundary(self, phases.public, pheads)
1837 phases.advanceboundary(self, phases.draft, subset)
1845 phases.advanceboundary(self, phases.draft, subset)
1838 else:
1846 else:
1839 # Remote is old or publishing all common changesets
1847 # Remote is old or publishing all common changesets
1840 # should be seen as public
1848 # should be seen as public
1841 phases.advanceboundary(self, phases.public, subset)
1849 phases.advanceboundary(self, phases.public, subset)
1842
1850
1843 if obsolete._enabled:
1851 if obsolete._enabled:
1844 self.ui.debug('fetching remote obsolete markers\n')
1852 self.ui.debug('fetching remote obsolete markers\n')
1845 remoteobs = remote.listkeys('obsolete')
1853 remoteobs = remote.listkeys('obsolete')
1846 if 'dump0' in remoteobs:
1854 if 'dump0' in remoteobs:
1847 if tr is None:
1855 if tr is None:
1848 tr = self.transaction(trname)
1856 tr = self.transaction(trname)
1849 for key in sorted(remoteobs, reverse=True):
1857 for key in sorted(remoteobs, reverse=True):
1850 if key.startswith('dump'):
1858 if key.startswith('dump'):
1851 data = base85.b85decode(remoteobs[key])
1859 data = base85.b85decode(remoteobs[key])
1852 self.obsstore.mergemarkers(tr, data)
1860 self.obsstore.mergemarkers(tr, data)
1853 if tr is not None:
1861 if tr is not None:
1854 tr.close()
1862 tr.close()
1855 finally:
1863 finally:
1856 if tr is not None:
1864 if tr is not None:
1857 tr.release()
1865 tr.release()
1858 lock.release()
1866 lock.release()
1859
1867
1860 return result
1868 return result
1861
1869
1862 def checkpush(self, force, revs):
1870 def checkpush(self, force, revs):
1863 """Extensions can override this function if additional checks have
1871 """Extensions can override this function if additional checks have
1864 to be performed before pushing, or call it if they override push
1872 to be performed before pushing, or call it if they override push
1865 command.
1873 command.
1866 """
1874 """
1867 pass
1875 pass
1868
1876
1869 def push(self, remote, force=False, revs=None, newbranch=False):
1877 def push(self, remote, force=False, revs=None, newbranch=False):
1870 '''Push outgoing changesets (limited by revs) from the current
1878 '''Push outgoing changesets (limited by revs) from the current
1871 repository to remote. Return an integer:
1879 repository to remote. Return an integer:
1872 - None means nothing to push
1880 - None means nothing to push
1873 - 0 means HTTP error
1881 - 0 means HTTP error
1874 - 1 means we pushed and remote head count is unchanged *or*
1882 - 1 means we pushed and remote head count is unchanged *or*
1875 we have outgoing changesets but refused to push
1883 we have outgoing changesets but refused to push
1876 - other values as described by addchangegroup()
1884 - other values as described by addchangegroup()
1877 '''
1885 '''
1878 # there are two ways to push to remote repo:
1886 # there are two ways to push to remote repo:
1879 #
1887 #
1880 # addchangegroup assumes local user can lock remote
1888 # addchangegroup assumes local user can lock remote
1881 # repo (local filesystem, old ssh servers).
1889 # repo (local filesystem, old ssh servers).
1882 #
1890 #
1883 # unbundle assumes local user cannot lock remote repo (new ssh
1891 # unbundle assumes local user cannot lock remote repo (new ssh
1884 # servers, http servers).
1892 # servers, http servers).
1885
1893
1886 if not remote.canpush():
1894 if not remote.canpush():
1887 raise util.Abort(_("destination does not support push"))
1895 raise util.Abort(_("destination does not support push"))
1888 unfi = self.unfiltered()
1896 unfi = self.unfiltered()
1889 # get local lock as we might write phase data
1897 # get local lock as we might write phase data
1890 locallock = self.lock()
1898 locallock = self.lock()
1891 try:
1899 try:
1892 self.checkpush(force, revs)
1900 self.checkpush(force, revs)
1893 lock = None
1901 lock = None
1894 unbundle = remote.capable('unbundle')
1902 unbundle = remote.capable('unbundle')
1895 if not unbundle:
1903 if not unbundle:
1896 lock = remote.lock()
1904 lock = remote.lock()
1897 try:
1905 try:
1898 # discovery
1906 # discovery
1899 fci = discovery.findcommonincoming
1907 fci = discovery.findcommonincoming
1900 commoninc = fci(unfi, remote, force=force)
1908 commoninc = fci(unfi, remote, force=force)
1901 common, inc, remoteheads = commoninc
1909 common, inc, remoteheads = commoninc
1902 fco = discovery.findcommonoutgoing
1910 fco = discovery.findcommonoutgoing
1903 outgoing = fco(unfi, remote, onlyheads=revs,
1911 outgoing = fco(unfi, remote, onlyheads=revs,
1904 commoninc=commoninc, force=force)
1912 commoninc=commoninc, force=force)
1905
1913
1906
1914
1907 if not outgoing.missing:
1915 if not outgoing.missing:
1908 # nothing to push
1916 # nothing to push
1909 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1917 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1910 ret = None
1918 ret = None
1911 else:
1919 else:
1912 # something to push
1920 # something to push
1913 if not force:
1921 if not force:
1914 # if self.obsstore == False --> no obsolete
1922 # if self.obsstore == False --> no obsolete
1915 # then, save the iteration
1923 # then, save the iteration
1916 if unfi.obsstore:
1924 if unfi.obsstore:
1917 # this message are here for 80 char limit reason
1925 # this message are here for 80 char limit reason
1918 mso = _("push includes obsolete changeset: %s!")
1926 mso = _("push includes obsolete changeset: %s!")
1919 msu = _("push includes unstable changeset: %s!")
1927 msu = _("push includes unstable changeset: %s!")
1920 msb = _("push includes bumped changeset: %s!")
1928 msb = _("push includes bumped changeset: %s!")
1921 msd = _("push includes divergent changeset: %s!")
1929 msd = _("push includes divergent changeset: %s!")
1922 # If we are to push if there is at least one
1930 # If we are to push if there is at least one
1923 # obsolete or unstable changeset in missing, at
1931 # obsolete or unstable changeset in missing, at
1924 # least one of the missinghead will be obsolete or
1932 # least one of the missinghead will be obsolete or
1925 # unstable. So checking heads only is ok
1933 # unstable. So checking heads only is ok
1926 for node in outgoing.missingheads:
1934 for node in outgoing.missingheads:
1927 ctx = unfi[node]
1935 ctx = unfi[node]
1928 if ctx.obsolete():
1936 if ctx.obsolete():
1929 raise util.Abort(mso % ctx)
1937 raise util.Abort(mso % ctx)
1930 elif ctx.unstable():
1938 elif ctx.unstable():
1931 raise util.Abort(msu % ctx)
1939 raise util.Abort(msu % ctx)
1932 elif ctx.bumped():
1940 elif ctx.bumped():
1933 raise util.Abort(msb % ctx)
1941 raise util.Abort(msb % ctx)
1934 elif ctx.divergent():
1942 elif ctx.divergent():
1935 raise util.Abort(msd % ctx)
1943 raise util.Abort(msd % ctx)
1936 discovery.checkheads(unfi, remote, outgoing,
1944 discovery.checkheads(unfi, remote, outgoing,
1937 remoteheads, newbranch,
1945 remoteheads, newbranch,
1938 bool(inc))
1946 bool(inc))
1939
1947
1940 # create a changegroup from local
1948 # create a changegroup from local
1941 if revs is None and not outgoing.excluded:
1949 if revs is None and not outgoing.excluded:
1942 # push everything,
1950 # push everything,
1943 # use the fast path, no race possible on push
1951 # use the fast path, no race possible on push
1944 cg = self._changegroup(outgoing.missing, 'push')
1952 cg = self._changegroup(outgoing.missing, 'push')
1945 else:
1953 else:
1946 cg = self.getlocalbundle('push', outgoing)
1954 cg = self.getlocalbundle('push', outgoing)
1947
1955
1948 # apply changegroup to remote
1956 # apply changegroup to remote
1949 if unbundle:
1957 if unbundle:
1950 # local repo finds heads on server, finds out what
1958 # local repo finds heads on server, finds out what
1951 # revs it must push. once revs transferred, if server
1959 # revs it must push. once revs transferred, if server
1952 # finds it has different heads (someone else won
1960 # finds it has different heads (someone else won
1953 # commit/push race), server aborts.
1961 # commit/push race), server aborts.
1954 if force:
1962 if force:
1955 remoteheads = ['force']
1963 remoteheads = ['force']
1956 # ssh: return remote's addchangegroup()
1964 # ssh: return remote's addchangegroup()
1957 # http: return remote's addchangegroup() or 0 for error
1965 # http: return remote's addchangegroup() or 0 for error
1958 ret = remote.unbundle(cg, remoteheads, 'push')
1966 ret = remote.unbundle(cg, remoteheads, 'push')
1959 else:
1967 else:
1960 # we return an integer indicating remote head count
1968 # we return an integer indicating remote head count
1961 # change
1969 # change
1962 ret = remote.addchangegroup(cg, 'push', self.url())
1970 ret = remote.addchangegroup(cg, 'push', self.url())
1963
1971
1964 if ret:
1972 if ret:
1965 # push succeed, synchronize target of the push
1973 # push succeed, synchronize target of the push
1966 cheads = outgoing.missingheads
1974 cheads = outgoing.missingheads
1967 elif revs is None:
1975 elif revs is None:
1968 # All out push fails. synchronize all common
1976 # All out push fails. synchronize all common
1969 cheads = outgoing.commonheads
1977 cheads = outgoing.commonheads
1970 else:
1978 else:
1971 # I want cheads = heads(::missingheads and ::commonheads)
1979 # I want cheads = heads(::missingheads and ::commonheads)
1972 # (missingheads is revs with secret changeset filtered out)
1980 # (missingheads is revs with secret changeset filtered out)
1973 #
1981 #
1974 # This can be expressed as:
1982 # This can be expressed as:
1975 # cheads = ( (missingheads and ::commonheads)
1983 # cheads = ( (missingheads and ::commonheads)
1976 # + (commonheads and ::missingheads))"
1984 # + (commonheads and ::missingheads))"
1977 # )
1985 # )
1978 #
1986 #
1979 # while trying to push we already computed the following:
1987 # while trying to push we already computed the following:
1980 # common = (::commonheads)
1988 # common = (::commonheads)
1981 # missing = ((commonheads::missingheads) - commonheads)
1989 # missing = ((commonheads::missingheads) - commonheads)
1982 #
1990 #
1983 # We can pick:
1991 # We can pick:
1984 # * missingheads part of common (::commonheads)
1992 # * missingheads part of common (::commonheads)
1985 common = set(outgoing.common)
1993 common = set(outgoing.common)
1986 cheads = [node for node in revs if node in common]
1994 cheads = [node for node in revs if node in common]
1987 # and
1995 # and
1988 # * commonheads parents on missing
1996 # * commonheads parents on missing
1989 revset = unfi.set('%ln and parents(roots(%ln))',
1997 revset = unfi.set('%ln and parents(roots(%ln))',
1990 outgoing.commonheads,
1998 outgoing.commonheads,
1991 outgoing.missing)
1999 outgoing.missing)
1992 cheads.extend(c.node() for c in revset)
2000 cheads.extend(c.node() for c in revset)
1993 # even when we don't push, exchanging phase data is useful
2001 # even when we don't push, exchanging phase data is useful
1994 remotephases = remote.listkeys('phases')
2002 remotephases = remote.listkeys('phases')
1995 if not remotephases: # old server or public only repo
2003 if not remotephases: # old server or public only repo
1996 phases.advanceboundary(self, phases.public, cheads)
2004 phases.advanceboundary(self, phases.public, cheads)
1997 # don't push any phase data as there is nothing to push
2005 # don't push any phase data as there is nothing to push
1998 else:
2006 else:
1999 ana = phases.analyzeremotephases(self, cheads, remotephases)
2007 ana = phases.analyzeremotephases(self, cheads, remotephases)
2000 pheads, droots = ana
2008 pheads, droots = ana
2001 ### Apply remote phase on local
2009 ### Apply remote phase on local
2002 if remotephases.get('publishing', False):
2010 if remotephases.get('publishing', False):
2003 phases.advanceboundary(self, phases.public, cheads)
2011 phases.advanceboundary(self, phases.public, cheads)
2004 else: # publish = False
2012 else: # publish = False
2005 phases.advanceboundary(self, phases.public, pheads)
2013 phases.advanceboundary(self, phases.public, pheads)
2006 phases.advanceboundary(self, phases.draft, cheads)
2014 phases.advanceboundary(self, phases.draft, cheads)
2007 ### Apply local phase on remote
2015 ### Apply local phase on remote
2008
2016
2009 # Get the list of all revs draft on remote by public here.
2017 # Get the list of all revs draft on remote by public here.
2010 # XXX Beware that revset break if droots is not strictly
2018 # XXX Beware that revset break if droots is not strictly
2011 # XXX root we may want to ensure it is but it is costly
2019 # XXX root we may want to ensure it is but it is costly
2012 outdated = unfi.set('heads((%ln::%ln) and public())',
2020 outdated = unfi.set('heads((%ln::%ln) and public())',
2013 droots, cheads)
2021 droots, cheads)
2014 for newremotehead in outdated:
2022 for newremotehead in outdated:
2015 r = remote.pushkey('phases',
2023 r = remote.pushkey('phases',
2016 newremotehead.hex(),
2024 newremotehead.hex(),
2017 str(phases.draft),
2025 str(phases.draft),
2018 str(phases.public))
2026 str(phases.public))
2019 if not r:
2027 if not r:
2020 self.ui.warn(_('updating %s to public failed!\n')
2028 self.ui.warn(_('updating %s to public failed!\n')
2021 % newremotehead)
2029 % newremotehead)
2022 self.ui.debug('try to push obsolete markers to remote\n')
2030 self.ui.debug('try to push obsolete markers to remote\n')
2023 if (obsolete._enabled and self.obsstore and
2031 if (obsolete._enabled and self.obsstore and
2024 'obsolete' in remote.listkeys('namespaces')):
2032 'obsolete' in remote.listkeys('namespaces')):
2025 rslts = []
2033 rslts = []
2026 remotedata = self.listkeys('obsolete')
2034 remotedata = self.listkeys('obsolete')
2027 for key in sorted(remotedata, reverse=True):
2035 for key in sorted(remotedata, reverse=True):
2028 # reverse sort to ensure we end with dump0
2036 # reverse sort to ensure we end with dump0
2029 data = remotedata[key]
2037 data = remotedata[key]
2030 rslts.append(remote.pushkey('obsolete', key, '', data))
2038 rslts.append(remote.pushkey('obsolete', key, '', data))
2031 if [r for r in rslts if not r]:
2039 if [r for r in rslts if not r]:
2032 msg = _('failed to push some obsolete markers!\n')
2040 msg = _('failed to push some obsolete markers!\n')
2033 self.ui.warn(msg)
2041 self.ui.warn(msg)
2034 finally:
2042 finally:
2035 if lock is not None:
2043 if lock is not None:
2036 lock.release()
2044 lock.release()
2037 finally:
2045 finally:
2038 locallock.release()
2046 locallock.release()
2039
2047
2040 self.ui.debug("checking for updated bookmarks\n")
2048 self.ui.debug("checking for updated bookmarks\n")
2041 rb = remote.listkeys('bookmarks')
2049 rb = remote.listkeys('bookmarks')
2042 for k in rb.keys():
2050 for k in rb.keys():
2043 if k in unfi._bookmarks:
2051 if k in unfi._bookmarks:
2044 nr, nl = rb[k], hex(self._bookmarks[k])
2052 nr, nl = rb[k], hex(self._bookmarks[k])
2045 if nr in unfi:
2053 if nr in unfi:
2046 cr = unfi[nr]
2054 cr = unfi[nr]
2047 cl = unfi[nl]
2055 cl = unfi[nl]
2048 if bookmarks.validdest(unfi, cr, cl):
2056 if bookmarks.validdest(unfi, cr, cl):
2049 r = remote.pushkey('bookmarks', k, nr, nl)
2057 r = remote.pushkey('bookmarks', k, nr, nl)
2050 if r:
2058 if r:
2051 self.ui.status(_("updating bookmark %s\n") % k)
2059 self.ui.status(_("updating bookmark %s\n") % k)
2052 else:
2060 else:
2053 self.ui.warn(_('updating bookmark %s'
2061 self.ui.warn(_('updating bookmark %s'
2054 ' failed!\n') % k)
2062 ' failed!\n') % k)
2055
2063
2056 return ret
2064 return ret
2057
2065
2058 def changegroupinfo(self, nodes, source):
2066 def changegroupinfo(self, nodes, source):
2059 if self.ui.verbose or source == 'bundle':
2067 if self.ui.verbose or source == 'bundle':
2060 self.ui.status(_("%d changesets found\n") % len(nodes))
2068 self.ui.status(_("%d changesets found\n") % len(nodes))
2061 if self.ui.debugflag:
2069 if self.ui.debugflag:
2062 self.ui.debug("list of changesets:\n")
2070 self.ui.debug("list of changesets:\n")
2063 for node in nodes:
2071 for node in nodes:
2064 self.ui.debug("%s\n" % hex(node))
2072 self.ui.debug("%s\n" % hex(node))
2065
2073
2066 def changegroupsubset(self, bases, heads, source):
2074 def changegroupsubset(self, bases, heads, source):
2067 """Compute a changegroup consisting of all the nodes that are
2075 """Compute a changegroup consisting of all the nodes that are
2068 descendants of any of the bases and ancestors of any of the heads.
2076 descendants of any of the bases and ancestors of any of the heads.
2069 Return a chunkbuffer object whose read() method will return
2077 Return a chunkbuffer object whose read() method will return
2070 successive changegroup chunks.
2078 successive changegroup chunks.
2071
2079
2072 It is fairly complex as determining which filenodes and which
2080 It is fairly complex as determining which filenodes and which
2073 manifest nodes need to be included for the changeset to be complete
2081 manifest nodes need to be included for the changeset to be complete
2074 is non-trivial.
2082 is non-trivial.
2075
2083
2076 Another wrinkle is doing the reverse, figuring out which changeset in
2084 Another wrinkle is doing the reverse, figuring out which changeset in
2077 the changegroup a particular filenode or manifestnode belongs to.
2085 the changegroup a particular filenode or manifestnode belongs to.
2078 """
2086 """
2079 cl = self.changelog
2087 cl = self.changelog
2080 if not bases:
2088 if not bases:
2081 bases = [nullid]
2089 bases = [nullid]
2082 csets, bases, heads = cl.nodesbetween(bases, heads)
2090 csets, bases, heads = cl.nodesbetween(bases, heads)
2083 # We assume that all ancestors of bases are known
2091 # We assume that all ancestors of bases are known
2084 common = cl.ancestors([cl.rev(n) for n in bases])
2092 common = cl.ancestors([cl.rev(n) for n in bases])
2085 return self._changegroupsubset(common, csets, heads, source)
2093 return self._changegroupsubset(common, csets, heads, source)
2086
2094
2087 def getlocalbundle(self, source, outgoing):
2095 def getlocalbundle(self, source, outgoing):
2088 """Like getbundle, but taking a discovery.outgoing as an argument.
2096 """Like getbundle, but taking a discovery.outgoing as an argument.
2089
2097
2090 This is only implemented for local repos and reuses potentially
2098 This is only implemented for local repos and reuses potentially
2091 precomputed sets in outgoing."""
2099 precomputed sets in outgoing."""
2092 if not outgoing.missing:
2100 if not outgoing.missing:
2093 return None
2101 return None
2094 return self._changegroupsubset(outgoing.common,
2102 return self._changegroupsubset(outgoing.common,
2095 outgoing.missing,
2103 outgoing.missing,
2096 outgoing.missingheads,
2104 outgoing.missingheads,
2097 source)
2105 source)
2098
2106
2099 def getbundle(self, source, heads=None, common=None):
2107 def getbundle(self, source, heads=None, common=None):
2100 """Like changegroupsubset, but returns the set difference between the
2108 """Like changegroupsubset, but returns the set difference between the
2101 ancestors of heads and the ancestors common.
2109 ancestors of heads and the ancestors common.
2102
2110
2103 If heads is None, use the local heads. If common is None, use [nullid].
2111 If heads is None, use the local heads. If common is None, use [nullid].
2104
2112
2105 The nodes in common might not all be known locally due to the way the
2113 The nodes in common might not all be known locally due to the way the
2106 current discovery protocol works.
2114 current discovery protocol works.
2107 """
2115 """
2108 cl = self.changelog
2116 cl = self.changelog
2109 if common:
2117 if common:
2110 hasnode = cl.hasnode
2118 hasnode = cl.hasnode
2111 common = [n for n in common if hasnode(n)]
2119 common = [n for n in common if hasnode(n)]
2112 else:
2120 else:
2113 common = [nullid]
2121 common = [nullid]
2114 if not heads:
2122 if not heads:
2115 heads = cl.heads()
2123 heads = cl.heads()
2116 return self.getlocalbundle(source,
2124 return self.getlocalbundle(source,
2117 discovery.outgoing(cl, common, heads))
2125 discovery.outgoing(cl, common, heads))
2118
2126
2119 @unfilteredmethod
2127 @unfilteredmethod
2120 def _changegroupsubset(self, commonrevs, csets, heads, source):
2128 def _changegroupsubset(self, commonrevs, csets, heads, source):
2121
2129
2122 cl = self.changelog
2130 cl = self.changelog
2123 mf = self.manifest
2131 mf = self.manifest
2124 mfs = {} # needed manifests
2132 mfs = {} # needed manifests
2125 fnodes = {} # needed file nodes
2133 fnodes = {} # needed file nodes
2126 changedfiles = set()
2134 changedfiles = set()
2127 fstate = ['', {}]
2135 fstate = ['', {}]
2128 count = [0, 0]
2136 count = [0, 0]
2129
2137
2130 # can we go through the fast path ?
2138 # can we go through the fast path ?
2131 heads.sort()
2139 heads.sort()
2132 if heads == sorted(self.heads()):
2140 if heads == sorted(self.heads()):
2133 return self._changegroup(csets, source)
2141 return self._changegroup(csets, source)
2134
2142
2135 # slow path
2143 # slow path
2136 self.hook('preoutgoing', throw=True, source=source)
2144 self.hook('preoutgoing', throw=True, source=source)
2137 self.changegroupinfo(csets, source)
2145 self.changegroupinfo(csets, source)
2138
2146
2139 # filter any nodes that claim to be part of the known set
2147 # filter any nodes that claim to be part of the known set
2140 def prune(revlog, missing):
2148 def prune(revlog, missing):
2141 rr, rl = revlog.rev, revlog.linkrev
2149 rr, rl = revlog.rev, revlog.linkrev
2142 return [n for n in missing
2150 return [n for n in missing
2143 if rl(rr(n)) not in commonrevs]
2151 if rl(rr(n)) not in commonrevs]
2144
2152
2145 progress = self.ui.progress
2153 progress = self.ui.progress
2146 _bundling = _('bundling')
2154 _bundling = _('bundling')
2147 _changesets = _('changesets')
2155 _changesets = _('changesets')
2148 _manifests = _('manifests')
2156 _manifests = _('manifests')
2149 _files = _('files')
2157 _files = _('files')
2150
2158
2151 def lookup(revlog, x):
2159 def lookup(revlog, x):
2152 if revlog == cl:
2160 if revlog == cl:
2153 c = cl.read(x)
2161 c = cl.read(x)
2154 changedfiles.update(c[3])
2162 changedfiles.update(c[3])
2155 mfs.setdefault(c[0], x)
2163 mfs.setdefault(c[0], x)
2156 count[0] += 1
2164 count[0] += 1
2157 progress(_bundling, count[0],
2165 progress(_bundling, count[0],
2158 unit=_changesets, total=count[1])
2166 unit=_changesets, total=count[1])
2159 return x
2167 return x
2160 elif revlog == mf:
2168 elif revlog == mf:
2161 clnode = mfs[x]
2169 clnode = mfs[x]
2162 mdata = mf.readfast(x)
2170 mdata = mf.readfast(x)
2163 for f, n in mdata.iteritems():
2171 for f, n in mdata.iteritems():
2164 if f in changedfiles:
2172 if f in changedfiles:
2165 fnodes[f].setdefault(n, clnode)
2173 fnodes[f].setdefault(n, clnode)
2166 count[0] += 1
2174 count[0] += 1
2167 progress(_bundling, count[0],
2175 progress(_bundling, count[0],
2168 unit=_manifests, total=count[1])
2176 unit=_manifests, total=count[1])
2169 return clnode
2177 return clnode
2170 else:
2178 else:
2171 progress(_bundling, count[0], item=fstate[0],
2179 progress(_bundling, count[0], item=fstate[0],
2172 unit=_files, total=count[1])
2180 unit=_files, total=count[1])
2173 return fstate[1][x]
2181 return fstate[1][x]
2174
2182
2175 bundler = changegroup.bundle10(lookup)
2183 bundler = changegroup.bundle10(lookup)
2176 reorder = self.ui.config('bundle', 'reorder', 'auto')
2184 reorder = self.ui.config('bundle', 'reorder', 'auto')
2177 if reorder == 'auto':
2185 if reorder == 'auto':
2178 reorder = None
2186 reorder = None
2179 else:
2187 else:
2180 reorder = util.parsebool(reorder)
2188 reorder = util.parsebool(reorder)
2181
2189
2182 def gengroup():
2190 def gengroup():
2183 # Create a changenode group generator that will call our functions
2191 # Create a changenode group generator that will call our functions
2184 # back to lookup the owning changenode and collect information.
2192 # back to lookup the owning changenode and collect information.
2185 count[:] = [0, len(csets)]
2193 count[:] = [0, len(csets)]
2186 for chunk in cl.group(csets, bundler, reorder=reorder):
2194 for chunk in cl.group(csets, bundler, reorder=reorder):
2187 yield chunk
2195 yield chunk
2188 progress(_bundling, None)
2196 progress(_bundling, None)
2189
2197
2190 # Create a generator for the manifestnodes that calls our lookup
2198 # Create a generator for the manifestnodes that calls our lookup
2191 # and data collection functions back.
2199 # and data collection functions back.
2192 for f in changedfiles:
2200 for f in changedfiles:
2193 fnodes[f] = {}
2201 fnodes[f] = {}
2194 count[:] = [0, len(mfs)]
2202 count[:] = [0, len(mfs)]
2195 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2203 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2196 yield chunk
2204 yield chunk
2197 progress(_bundling, None)
2205 progress(_bundling, None)
2198
2206
2199 mfs.clear()
2207 mfs.clear()
2200
2208
2201 # Go through all our files in order sorted by name.
2209 # Go through all our files in order sorted by name.
2202 count[:] = [0, len(changedfiles)]
2210 count[:] = [0, len(changedfiles)]
2203 for fname in sorted(changedfiles):
2211 for fname in sorted(changedfiles):
2204 filerevlog = self.file(fname)
2212 filerevlog = self.file(fname)
2205 if not len(filerevlog):
2213 if not len(filerevlog):
2206 raise util.Abort(_("empty or missing revlog for %s")
2214 raise util.Abort(_("empty or missing revlog for %s")
2207 % fname)
2215 % fname)
2208 fstate[0] = fname
2216 fstate[0] = fname
2209 fstate[1] = fnodes.pop(fname, {})
2217 fstate[1] = fnodes.pop(fname, {})
2210
2218
2211 nodelist = prune(filerevlog, fstate[1])
2219 nodelist = prune(filerevlog, fstate[1])
2212 if nodelist:
2220 if nodelist:
2213 count[0] += 1
2221 count[0] += 1
2214 yield bundler.fileheader(fname)
2222 yield bundler.fileheader(fname)
2215 for chunk in filerevlog.group(nodelist, bundler, reorder):
2223 for chunk in filerevlog.group(nodelist, bundler, reorder):
2216 yield chunk
2224 yield chunk
2217
2225
2218 # Signal that no more groups are left.
2226 # Signal that no more groups are left.
2219 yield bundler.close()
2227 yield bundler.close()
2220 progress(_bundling, None)
2228 progress(_bundling, None)
2221
2229
2222 if csets:
2230 if csets:
2223 self.hook('outgoing', node=hex(csets[0]), source=source)
2231 self.hook('outgoing', node=hex(csets[0]), source=source)
2224
2232
2225 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2233 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2226
2234
2227 def changegroup(self, basenodes, source):
2235 def changegroup(self, basenodes, source):
2228 # to avoid a race we use changegroupsubset() (issue1320)
2236 # to avoid a race we use changegroupsubset() (issue1320)
2229 return self.changegroupsubset(basenodes, self.heads(), source)
2237 return self.changegroupsubset(basenodes, self.heads(), source)
2230
2238
2231 @unfilteredmethod
2239 @unfilteredmethod
2232 def _changegroup(self, nodes, source):
2240 def _changegroup(self, nodes, source):
2233 """Compute the changegroup of all nodes that we have that a recipient
2241 """Compute the changegroup of all nodes that we have that a recipient
2234 doesn't. Return a chunkbuffer object whose read() method will return
2242 doesn't. Return a chunkbuffer object whose read() method will return
2235 successive changegroup chunks.
2243 successive changegroup chunks.
2236
2244
2237 This is much easier than the previous function as we can assume that
2245 This is much easier than the previous function as we can assume that
2238 the recipient has any changenode we aren't sending them.
2246 the recipient has any changenode we aren't sending them.
2239
2247
2240 nodes is the set of nodes to send"""
2248 nodes is the set of nodes to send"""
2241
2249
2242 cl = self.changelog
2250 cl = self.changelog
2243 mf = self.manifest
2251 mf = self.manifest
2244 mfs = {}
2252 mfs = {}
2245 changedfiles = set()
2253 changedfiles = set()
2246 fstate = ['']
2254 fstate = ['']
2247 count = [0, 0]
2255 count = [0, 0]
2248
2256
2249 self.hook('preoutgoing', throw=True, source=source)
2257 self.hook('preoutgoing', throw=True, source=source)
2250 self.changegroupinfo(nodes, source)
2258 self.changegroupinfo(nodes, source)
2251
2259
2252 revset = set([cl.rev(n) for n in nodes])
2260 revset = set([cl.rev(n) for n in nodes])
2253
2261
2254 def gennodelst(log):
2262 def gennodelst(log):
2255 ln, llr = log.node, log.linkrev
2263 ln, llr = log.node, log.linkrev
2256 return [ln(r) for r in log if llr(r) in revset]
2264 return [ln(r) for r in log if llr(r) in revset]
2257
2265
2258 progress = self.ui.progress
2266 progress = self.ui.progress
2259 _bundling = _('bundling')
2267 _bundling = _('bundling')
2260 _changesets = _('changesets')
2268 _changesets = _('changesets')
2261 _manifests = _('manifests')
2269 _manifests = _('manifests')
2262 _files = _('files')
2270 _files = _('files')
2263
2271
2264 def lookup(revlog, x):
2272 def lookup(revlog, x):
2265 if revlog == cl:
2273 if revlog == cl:
2266 c = cl.read(x)
2274 c = cl.read(x)
2267 changedfiles.update(c[3])
2275 changedfiles.update(c[3])
2268 mfs.setdefault(c[0], x)
2276 mfs.setdefault(c[0], x)
2269 count[0] += 1
2277 count[0] += 1
2270 progress(_bundling, count[0],
2278 progress(_bundling, count[0],
2271 unit=_changesets, total=count[1])
2279 unit=_changesets, total=count[1])
2272 return x
2280 return x
2273 elif revlog == mf:
2281 elif revlog == mf:
2274 count[0] += 1
2282 count[0] += 1
2275 progress(_bundling, count[0],
2283 progress(_bundling, count[0],
2276 unit=_manifests, total=count[1])
2284 unit=_manifests, total=count[1])
2277 return cl.node(revlog.linkrev(revlog.rev(x)))
2285 return cl.node(revlog.linkrev(revlog.rev(x)))
2278 else:
2286 else:
2279 progress(_bundling, count[0], item=fstate[0],
2287 progress(_bundling, count[0], item=fstate[0],
2280 total=count[1], unit=_files)
2288 total=count[1], unit=_files)
2281 return cl.node(revlog.linkrev(revlog.rev(x)))
2289 return cl.node(revlog.linkrev(revlog.rev(x)))
2282
2290
2283 bundler = changegroup.bundle10(lookup)
2291 bundler = changegroup.bundle10(lookup)
2284 reorder = self.ui.config('bundle', 'reorder', 'auto')
2292 reorder = self.ui.config('bundle', 'reorder', 'auto')
2285 if reorder == 'auto':
2293 if reorder == 'auto':
2286 reorder = None
2294 reorder = None
2287 else:
2295 else:
2288 reorder = util.parsebool(reorder)
2296 reorder = util.parsebool(reorder)
2289
2297
2290 def gengroup():
2298 def gengroup():
2291 '''yield a sequence of changegroup chunks (strings)'''
2299 '''yield a sequence of changegroup chunks (strings)'''
2292 # construct a list of all changed files
2300 # construct a list of all changed files
2293
2301
2294 count[:] = [0, len(nodes)]
2302 count[:] = [0, len(nodes)]
2295 for chunk in cl.group(nodes, bundler, reorder=reorder):
2303 for chunk in cl.group(nodes, bundler, reorder=reorder):
2296 yield chunk
2304 yield chunk
2297 progress(_bundling, None)
2305 progress(_bundling, None)
2298
2306
2299 count[:] = [0, len(mfs)]
2307 count[:] = [0, len(mfs)]
2300 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2308 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2301 yield chunk
2309 yield chunk
2302 progress(_bundling, None)
2310 progress(_bundling, None)
2303
2311
2304 count[:] = [0, len(changedfiles)]
2312 count[:] = [0, len(changedfiles)]
2305 for fname in sorted(changedfiles):
2313 for fname in sorted(changedfiles):
2306 filerevlog = self.file(fname)
2314 filerevlog = self.file(fname)
2307 if not len(filerevlog):
2315 if not len(filerevlog):
2308 raise util.Abort(_("empty or missing revlog for %s")
2316 raise util.Abort(_("empty or missing revlog for %s")
2309 % fname)
2317 % fname)
2310 fstate[0] = fname
2318 fstate[0] = fname
2311 nodelist = gennodelst(filerevlog)
2319 nodelist = gennodelst(filerevlog)
2312 if nodelist:
2320 if nodelist:
2313 count[0] += 1
2321 count[0] += 1
2314 yield bundler.fileheader(fname)
2322 yield bundler.fileheader(fname)
2315 for chunk in filerevlog.group(nodelist, bundler, reorder):
2323 for chunk in filerevlog.group(nodelist, bundler, reorder):
2316 yield chunk
2324 yield chunk
2317 yield bundler.close()
2325 yield bundler.close()
2318 progress(_bundling, None)
2326 progress(_bundling, None)
2319
2327
2320 if nodes:
2328 if nodes:
2321 self.hook('outgoing', node=hex(nodes[0]), source=source)
2329 self.hook('outgoing', node=hex(nodes[0]), source=source)
2322
2330
2323 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2331 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2324
2332
2325 @unfilteredmethod
2333 @unfilteredmethod
2326 def addchangegroup(self, source, srctype, url, emptyok=False):
2334 def addchangegroup(self, source, srctype, url, emptyok=False):
2327 """Add the changegroup returned by source.read() to this repo.
2335 """Add the changegroup returned by source.read() to this repo.
2328 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2336 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2329 the URL of the repo where this changegroup is coming from.
2337 the URL of the repo where this changegroup is coming from.
2330
2338
2331 Return an integer summarizing the change to this repo:
2339 Return an integer summarizing the change to this repo:
2332 - nothing changed or no source: 0
2340 - nothing changed or no source: 0
2333 - more heads than before: 1+added heads (2..n)
2341 - more heads than before: 1+added heads (2..n)
2334 - fewer heads than before: -1-removed heads (-2..-n)
2342 - fewer heads than before: -1-removed heads (-2..-n)
2335 - number of heads stays the same: 1
2343 - number of heads stays the same: 1
2336 """
2344 """
2337 def csmap(x):
2345 def csmap(x):
2338 self.ui.debug("add changeset %s\n" % short(x))
2346 self.ui.debug("add changeset %s\n" % short(x))
2339 return len(cl)
2347 return len(cl)
2340
2348
2341 def revmap(x):
2349 def revmap(x):
2342 return cl.rev(x)
2350 return cl.rev(x)
2343
2351
2344 if not source:
2352 if not source:
2345 return 0
2353 return 0
2346
2354
2347 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2355 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2348
2356
2349 changesets = files = revisions = 0
2357 changesets = files = revisions = 0
2350 efiles = set()
2358 efiles = set()
2351
2359
2352 # write changelog data to temp files so concurrent readers will not see
2360 # write changelog data to temp files so concurrent readers will not see
2353 # inconsistent view
2361 # inconsistent view
2354 cl = self.changelog
2362 cl = self.changelog
2355 cl.delayupdate()
2363 cl.delayupdate()
2356 oldheads = cl.heads()
2364 oldheads = cl.heads()
2357
2365
2358 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2366 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2359 try:
2367 try:
2360 trp = weakref.proxy(tr)
2368 trp = weakref.proxy(tr)
2361 # pull off the changeset group
2369 # pull off the changeset group
2362 self.ui.status(_("adding changesets\n"))
2370 self.ui.status(_("adding changesets\n"))
2363 clstart = len(cl)
2371 clstart = len(cl)
2364 class prog(object):
2372 class prog(object):
2365 step = _('changesets')
2373 step = _('changesets')
2366 count = 1
2374 count = 1
2367 ui = self.ui
2375 ui = self.ui
2368 total = None
2376 total = None
2369 def __call__(self):
2377 def __call__(self):
2370 self.ui.progress(self.step, self.count, unit=_('chunks'),
2378 self.ui.progress(self.step, self.count, unit=_('chunks'),
2371 total=self.total)
2379 total=self.total)
2372 self.count += 1
2380 self.count += 1
2373 pr = prog()
2381 pr = prog()
2374 source.callback = pr
2382 source.callback = pr
2375
2383
2376 source.changelogheader()
2384 source.changelogheader()
2377 srccontent = cl.addgroup(source, csmap, trp)
2385 srccontent = cl.addgroup(source, csmap, trp)
2378 if not (srccontent or emptyok):
2386 if not (srccontent or emptyok):
2379 raise util.Abort(_("received changelog group is empty"))
2387 raise util.Abort(_("received changelog group is empty"))
2380 clend = len(cl)
2388 clend = len(cl)
2381 changesets = clend - clstart
2389 changesets = clend - clstart
2382 for c in xrange(clstart, clend):
2390 for c in xrange(clstart, clend):
2383 efiles.update(self[c].files())
2391 efiles.update(self[c].files())
2384 efiles = len(efiles)
2392 efiles = len(efiles)
2385 self.ui.progress(_('changesets'), None)
2393 self.ui.progress(_('changesets'), None)
2386
2394
2387 # pull off the manifest group
2395 # pull off the manifest group
2388 self.ui.status(_("adding manifests\n"))
2396 self.ui.status(_("adding manifests\n"))
2389 pr.step = _('manifests')
2397 pr.step = _('manifests')
2390 pr.count = 1
2398 pr.count = 1
2391 pr.total = changesets # manifests <= changesets
2399 pr.total = changesets # manifests <= changesets
2392 # no need to check for empty manifest group here:
2400 # no need to check for empty manifest group here:
2393 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2401 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2394 # no new manifest will be created and the manifest group will
2402 # no new manifest will be created and the manifest group will
2395 # be empty during the pull
2403 # be empty during the pull
2396 source.manifestheader()
2404 source.manifestheader()
2397 self.manifest.addgroup(source, revmap, trp)
2405 self.manifest.addgroup(source, revmap, trp)
2398 self.ui.progress(_('manifests'), None)
2406 self.ui.progress(_('manifests'), None)
2399
2407
2400 needfiles = {}
2408 needfiles = {}
2401 if self.ui.configbool('server', 'validate', default=False):
2409 if self.ui.configbool('server', 'validate', default=False):
2402 # validate incoming csets have their manifests
2410 # validate incoming csets have their manifests
2403 for cset in xrange(clstart, clend):
2411 for cset in xrange(clstart, clend):
2404 mfest = self.changelog.read(self.changelog.node(cset))[0]
2412 mfest = self.changelog.read(self.changelog.node(cset))[0]
2405 mfest = self.manifest.readdelta(mfest)
2413 mfest = self.manifest.readdelta(mfest)
2406 # store file nodes we must see
2414 # store file nodes we must see
2407 for f, n in mfest.iteritems():
2415 for f, n in mfest.iteritems():
2408 needfiles.setdefault(f, set()).add(n)
2416 needfiles.setdefault(f, set()).add(n)
2409
2417
2410 # process the files
2418 # process the files
2411 self.ui.status(_("adding file changes\n"))
2419 self.ui.status(_("adding file changes\n"))
2412 pr.step = _('files')
2420 pr.step = _('files')
2413 pr.count = 1
2421 pr.count = 1
2414 pr.total = efiles
2422 pr.total = efiles
2415 source.callback = None
2423 source.callback = None
2416
2424
2417 while True:
2425 while True:
2418 chunkdata = source.filelogheader()
2426 chunkdata = source.filelogheader()
2419 if not chunkdata:
2427 if not chunkdata:
2420 break
2428 break
2421 f = chunkdata["filename"]
2429 f = chunkdata["filename"]
2422 self.ui.debug("adding %s revisions\n" % f)
2430 self.ui.debug("adding %s revisions\n" % f)
2423 pr()
2431 pr()
2424 fl = self.file(f)
2432 fl = self.file(f)
2425 o = len(fl)
2433 o = len(fl)
2426 if not fl.addgroup(source, revmap, trp):
2434 if not fl.addgroup(source, revmap, trp):
2427 raise util.Abort(_("received file revlog group is empty"))
2435 raise util.Abort(_("received file revlog group is empty"))
2428 revisions += len(fl) - o
2436 revisions += len(fl) - o
2429 files += 1
2437 files += 1
2430 if f in needfiles:
2438 if f in needfiles:
2431 needs = needfiles[f]
2439 needs = needfiles[f]
2432 for new in xrange(o, len(fl)):
2440 for new in xrange(o, len(fl)):
2433 n = fl.node(new)
2441 n = fl.node(new)
2434 if n in needs:
2442 if n in needs:
2435 needs.remove(n)
2443 needs.remove(n)
2436 if not needs:
2444 if not needs:
2437 del needfiles[f]
2445 del needfiles[f]
2438 self.ui.progress(_('files'), None)
2446 self.ui.progress(_('files'), None)
2439
2447
2440 for f, needs in needfiles.iteritems():
2448 for f, needs in needfiles.iteritems():
2441 fl = self.file(f)
2449 fl = self.file(f)
2442 for n in needs:
2450 for n in needs:
2443 try:
2451 try:
2444 fl.rev(n)
2452 fl.rev(n)
2445 except error.LookupError:
2453 except error.LookupError:
2446 raise util.Abort(
2454 raise util.Abort(
2447 _('missing file data for %s:%s - run hg verify') %
2455 _('missing file data for %s:%s - run hg verify') %
2448 (f, hex(n)))
2456 (f, hex(n)))
2449
2457
2450 dh = 0
2458 dh = 0
2451 if oldheads:
2459 if oldheads:
2452 heads = cl.heads()
2460 heads = cl.heads()
2453 dh = len(heads) - len(oldheads)
2461 dh = len(heads) - len(oldheads)
2454 for h in heads:
2462 for h in heads:
2455 if h not in oldheads and self[h].closesbranch():
2463 if h not in oldheads and self[h].closesbranch():
2456 dh -= 1
2464 dh -= 1
2457 htext = ""
2465 htext = ""
2458 if dh:
2466 if dh:
2459 htext = _(" (%+d heads)") % dh
2467 htext = _(" (%+d heads)") % dh
2460
2468
2461 self.ui.status(_("added %d changesets"
2469 self.ui.status(_("added %d changesets"
2462 " with %d changes to %d files%s\n")
2470 " with %d changes to %d files%s\n")
2463 % (changesets, revisions, files, htext))
2471 % (changesets, revisions, files, htext))
2464 obsolete.clearobscaches(self)
2472 obsolete.clearobscaches(self)
2465
2473
2466 if changesets > 0:
2474 if changesets > 0:
2467 p = lambda: cl.writepending() and self.root or ""
2475 p = lambda: cl.writepending() and self.root or ""
2468 self.hook('pretxnchangegroup', throw=True,
2476 self.hook('pretxnchangegroup', throw=True,
2469 node=hex(cl.node(clstart)), source=srctype,
2477 node=hex(cl.node(clstart)), source=srctype,
2470 url=url, pending=p)
2478 url=url, pending=p)
2471
2479
2472 added = [cl.node(r) for r in xrange(clstart, clend)]
2480 added = [cl.node(r) for r in xrange(clstart, clend)]
2473 publishing = self.ui.configbool('phases', 'publish', True)
2481 publishing = self.ui.configbool('phases', 'publish', True)
2474 if srctype == 'push':
2482 if srctype == 'push':
2475 # Old server can not push the boundary themself.
2483 # Old server can not push the boundary themself.
2476 # New server won't push the boundary if changeset already
2484 # New server won't push the boundary if changeset already
2477 # existed locally as secrete
2485 # existed locally as secrete
2478 #
2486 #
2479 # We should not use added here but the list of all change in
2487 # We should not use added here but the list of all change in
2480 # the bundle
2488 # the bundle
2481 if publishing:
2489 if publishing:
2482 phases.advanceboundary(self, phases.public, srccontent)
2490 phases.advanceboundary(self, phases.public, srccontent)
2483 else:
2491 else:
2484 phases.advanceboundary(self, phases.draft, srccontent)
2492 phases.advanceboundary(self, phases.draft, srccontent)
2485 phases.retractboundary(self, phases.draft, added)
2493 phases.retractboundary(self, phases.draft, added)
2486 elif srctype != 'strip':
2494 elif srctype != 'strip':
2487 # publishing only alter behavior during push
2495 # publishing only alter behavior during push
2488 #
2496 #
2489 # strip should not touch boundary at all
2497 # strip should not touch boundary at all
2490 phases.retractboundary(self, phases.draft, added)
2498 phases.retractboundary(self, phases.draft, added)
2491
2499
2492 # make changelog see real files again
2500 # make changelog see real files again
2493 cl.finalize(trp)
2501 cl.finalize(trp)
2494
2502
2495 tr.close()
2503 tr.close()
2496
2504
2497 if changesets > 0:
2505 if changesets > 0:
2498 self.updatebranchcache()
2506 self.updatebranchcache()
2499 def runhooks():
2507 def runhooks():
2500 # forcefully update the on-disk branch cache
2508 # forcefully update the on-disk branch cache
2501 self.ui.debug("updating the branch cache\n")
2509 self.ui.debug("updating the branch cache\n")
2502 self.hook("changegroup", node=hex(cl.node(clstart)),
2510 self.hook("changegroup", node=hex(cl.node(clstart)),
2503 source=srctype, url=url)
2511 source=srctype, url=url)
2504
2512
2505 for n in added:
2513 for n in added:
2506 self.hook("incoming", node=hex(n), source=srctype,
2514 self.hook("incoming", node=hex(n), source=srctype,
2507 url=url)
2515 url=url)
2508 self._afterlock(runhooks)
2516 self._afterlock(runhooks)
2509
2517
2510 finally:
2518 finally:
2511 tr.release()
2519 tr.release()
2512 # never return 0 here:
2520 # never return 0 here:
2513 if dh < 0:
2521 if dh < 0:
2514 return dh - 1
2522 return dh - 1
2515 else:
2523 else:
2516 return dh + 1
2524 return dh + 1
2517
2525
2518 def stream_in(self, remote, requirements):
2526 def stream_in(self, remote, requirements):
2519 lock = self.lock()
2527 lock = self.lock()
2520 try:
2528 try:
2521 # Save remote branchmap. We will use it later
2529 # Save remote branchmap. We will use it later
2522 # to speed up branchcache creation
2530 # to speed up branchcache creation
2523 rbranchmap = None
2531 rbranchmap = None
2524 if remote.capable("branchmap"):
2532 if remote.capable("branchmap"):
2525 rbranchmap = remote.branchmap()
2533 rbranchmap = remote.branchmap()
2526
2534
2527 fp = remote.stream_out()
2535 fp = remote.stream_out()
2528 l = fp.readline()
2536 l = fp.readline()
2529 try:
2537 try:
2530 resp = int(l)
2538 resp = int(l)
2531 except ValueError:
2539 except ValueError:
2532 raise error.ResponseError(
2540 raise error.ResponseError(
2533 _('unexpected response from remote server:'), l)
2541 _('unexpected response from remote server:'), l)
2534 if resp == 1:
2542 if resp == 1:
2535 raise util.Abort(_('operation forbidden by server'))
2543 raise util.Abort(_('operation forbidden by server'))
2536 elif resp == 2:
2544 elif resp == 2:
2537 raise util.Abort(_('locking the remote repository failed'))
2545 raise util.Abort(_('locking the remote repository failed'))
2538 elif resp != 0:
2546 elif resp != 0:
2539 raise util.Abort(_('the server sent an unknown error code'))
2547 raise util.Abort(_('the server sent an unknown error code'))
2540 self.ui.status(_('streaming all changes\n'))
2548 self.ui.status(_('streaming all changes\n'))
2541 l = fp.readline()
2549 l = fp.readline()
2542 try:
2550 try:
2543 total_files, total_bytes = map(int, l.split(' ', 1))
2551 total_files, total_bytes = map(int, l.split(' ', 1))
2544 except (ValueError, TypeError):
2552 except (ValueError, TypeError):
2545 raise error.ResponseError(
2553 raise error.ResponseError(
2546 _('unexpected response from remote server:'), l)
2554 _('unexpected response from remote server:'), l)
2547 self.ui.status(_('%d files to transfer, %s of data\n') %
2555 self.ui.status(_('%d files to transfer, %s of data\n') %
2548 (total_files, util.bytecount(total_bytes)))
2556 (total_files, util.bytecount(total_bytes)))
2549 handled_bytes = 0
2557 handled_bytes = 0
2550 self.ui.progress(_('clone'), 0, total=total_bytes)
2558 self.ui.progress(_('clone'), 0, total=total_bytes)
2551 start = time.time()
2559 start = time.time()
2552 for i in xrange(total_files):
2560 for i in xrange(total_files):
2553 # XXX doesn't support '\n' or '\r' in filenames
2561 # XXX doesn't support '\n' or '\r' in filenames
2554 l = fp.readline()
2562 l = fp.readline()
2555 try:
2563 try:
2556 name, size = l.split('\0', 1)
2564 name, size = l.split('\0', 1)
2557 size = int(size)
2565 size = int(size)
2558 except (ValueError, TypeError):
2566 except (ValueError, TypeError):
2559 raise error.ResponseError(
2567 raise error.ResponseError(
2560 _('unexpected response from remote server:'), l)
2568 _('unexpected response from remote server:'), l)
2561 if self.ui.debugflag:
2569 if self.ui.debugflag:
2562 self.ui.debug('adding %s (%s)\n' %
2570 self.ui.debug('adding %s (%s)\n' %
2563 (name, util.bytecount(size)))
2571 (name, util.bytecount(size)))
2564 # for backwards compat, name was partially encoded
2572 # for backwards compat, name was partially encoded
2565 ofp = self.sopener(store.decodedir(name), 'w')
2573 ofp = self.sopener(store.decodedir(name), 'w')
2566 for chunk in util.filechunkiter(fp, limit=size):
2574 for chunk in util.filechunkiter(fp, limit=size):
2567 handled_bytes += len(chunk)
2575 handled_bytes += len(chunk)
2568 self.ui.progress(_('clone'), handled_bytes,
2576 self.ui.progress(_('clone'), handled_bytes,
2569 total=total_bytes)
2577 total=total_bytes)
2570 ofp.write(chunk)
2578 ofp.write(chunk)
2571 ofp.close()
2579 ofp.close()
2572 elapsed = time.time() - start
2580 elapsed = time.time() - start
2573 if elapsed <= 0:
2581 if elapsed <= 0:
2574 elapsed = 0.001
2582 elapsed = 0.001
2575 self.ui.progress(_('clone'), None)
2583 self.ui.progress(_('clone'), None)
2576 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2584 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2577 (util.bytecount(total_bytes), elapsed,
2585 (util.bytecount(total_bytes), elapsed,
2578 util.bytecount(total_bytes / elapsed)))
2586 util.bytecount(total_bytes / elapsed)))
2579
2587
2580 # new requirements = old non-format requirements +
2588 # new requirements = old non-format requirements +
2581 # new format-related
2589 # new format-related
2582 # requirements from the streamed-in repository
2590 # requirements from the streamed-in repository
2583 requirements.update(set(self.requirements) - self.supportedformats)
2591 requirements.update(set(self.requirements) - self.supportedformats)
2584 self._applyrequirements(requirements)
2592 self._applyrequirements(requirements)
2585 self._writerequirements()
2593 self._writerequirements()
2586
2594
2587 if rbranchmap:
2595 if rbranchmap:
2588 rbheads = []
2596 rbheads = []
2589 for bheads in rbranchmap.itervalues():
2597 for bheads in rbranchmap.itervalues():
2590 rbheads.extend(bheads)
2598 rbheads.extend(bheads)
2591
2599
2592 self.branchcache = rbranchmap
2600 self.branchcache = rbranchmap
2593 if rbheads:
2601 if rbheads:
2594 rtiprev = max((int(self.changelog.rev(node))
2602 rtiprev = max((int(self.changelog.rev(node))
2595 for node in rbheads))
2603 for node in rbheads))
2596 self._writebranchcache(self.branchcache,
2604 self._writebranchcache(self.branchcache,
2597 self[rtiprev].node(), rtiprev)
2605 self[rtiprev].node(), rtiprev)
2598 self.invalidate()
2606 self.invalidate()
2599 return len(self.heads()) + 1
2607 return len(self.heads()) + 1
2600 finally:
2608 finally:
2601 lock.release()
2609 lock.release()
2602
2610
2603 def clone(self, remote, heads=[], stream=False):
2611 def clone(self, remote, heads=[], stream=False):
2604 '''clone remote repository.
2612 '''clone remote repository.
2605
2613
2606 keyword arguments:
2614 keyword arguments:
2607 heads: list of revs to clone (forces use of pull)
2615 heads: list of revs to clone (forces use of pull)
2608 stream: use streaming clone if possible'''
2616 stream: use streaming clone if possible'''
2609
2617
2610 # now, all clients that can request uncompressed clones can
2618 # now, all clients that can request uncompressed clones can
2611 # read repo formats supported by all servers that can serve
2619 # read repo formats supported by all servers that can serve
2612 # them.
2620 # them.
2613
2621
2614 # if revlog format changes, client will have to check version
2622 # if revlog format changes, client will have to check version
2615 # and format flags on "stream" capability, and use
2623 # and format flags on "stream" capability, and use
2616 # uncompressed only if compatible.
2624 # uncompressed only if compatible.
2617
2625
2618 if not stream:
2626 if not stream:
2619 # if the server explicitly prefers to stream (for fast LANs)
2627 # if the server explicitly prefers to stream (for fast LANs)
2620 stream = remote.capable('stream-preferred')
2628 stream = remote.capable('stream-preferred')
2621
2629
2622 if stream and not heads:
2630 if stream and not heads:
2623 # 'stream' means remote revlog format is revlogv1 only
2631 # 'stream' means remote revlog format is revlogv1 only
2624 if remote.capable('stream'):
2632 if remote.capable('stream'):
2625 return self.stream_in(remote, set(('revlogv1',)))
2633 return self.stream_in(remote, set(('revlogv1',)))
2626 # otherwise, 'streamreqs' contains the remote revlog format
2634 # otherwise, 'streamreqs' contains the remote revlog format
2627 streamreqs = remote.capable('streamreqs')
2635 streamreqs = remote.capable('streamreqs')
2628 if streamreqs:
2636 if streamreqs:
2629 streamreqs = set(streamreqs.split(','))
2637 streamreqs = set(streamreqs.split(','))
2630 # if we support it, stream in and adjust our requirements
2638 # if we support it, stream in and adjust our requirements
2631 if not streamreqs - self.supportedformats:
2639 if not streamreqs - self.supportedformats:
2632 return self.stream_in(remote, streamreqs)
2640 return self.stream_in(remote, streamreqs)
2633 return self.pull(remote, heads)
2641 return self.pull(remote, heads)
2634
2642
2635 def pushkey(self, namespace, key, old, new):
2643 def pushkey(self, namespace, key, old, new):
2636 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2644 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2637 old=old, new=new)
2645 old=old, new=new)
2638 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2646 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2639 ret = pushkey.push(self, namespace, key, old, new)
2647 ret = pushkey.push(self, namespace, key, old, new)
2640 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2648 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2641 ret=ret)
2649 ret=ret)
2642 return ret
2650 return ret
2643
2651
2644 def listkeys(self, namespace):
2652 def listkeys(self, namespace):
2645 self.hook('prelistkeys', throw=True, namespace=namespace)
2653 self.hook('prelistkeys', throw=True, namespace=namespace)
2646 self.ui.debug('listing keys for "%s"\n' % namespace)
2654 self.ui.debug('listing keys for "%s"\n' % namespace)
2647 values = pushkey.list(self, namespace)
2655 values = pushkey.list(self, namespace)
2648 self.hook('listkeys', namespace=namespace, values=values)
2656 self.hook('listkeys', namespace=namespace, values=values)
2649 return values
2657 return values
2650
2658
2651 def debugwireargs(self, one, two, three=None, four=None, five=None):
2659 def debugwireargs(self, one, two, three=None, four=None, five=None):
2652 '''used to test argument passing over the wire'''
2660 '''used to test argument passing over the wire'''
2653 return "%s %s %s %s %s" % (one, two, three, four, five)
2661 return "%s %s %s %s %s" % (one, two, three, four, five)
2654
2662
2655 def savecommitmessage(self, text):
2663 def savecommitmessage(self, text):
2656 fp = self.opener('last-message.txt', 'wb')
2664 fp = self.opener('last-message.txt', 'wb')
2657 try:
2665 try:
2658 fp.write(text)
2666 fp.write(text)
2659 finally:
2667 finally:
2660 fp.close()
2668 fp.close()
2661 return self.pathto(fp.name[len(self.root) + 1:])
2669 return self.pathto(fp.name[len(self.root) + 1:])
2662
2670
2663 # used to avoid circular references so destructors work
2671 # used to avoid circular references so destructors work
2664 def aftertrans(files):
2672 def aftertrans(files):
2665 renamefiles = [tuple(t) for t in files]
2673 renamefiles = [tuple(t) for t in files]
2666 def a():
2674 def a():
2667 for src, dest in renamefiles:
2675 for src, dest in renamefiles:
2668 try:
2676 try:
2669 util.rename(src, dest)
2677 util.rename(src, dest)
2670 except OSError: # journal file does not yet exist
2678 except OSError: # journal file does not yet exist
2671 pass
2679 pass
2672 return a
2680 return a
2673
2681
2674 def undoname(fn):
2682 def undoname(fn):
2675 base, name = os.path.split(fn)
2683 base, name = os.path.split(fn)
2676 assert name.startswith('journal')
2684 assert name.startswith('journal')
2677 return os.path.join(base, name.replace('journal', 'undo', 1))
2685 return os.path.join(base, name.replace('journal', 'undo', 1))
2678
2686
2679 def instance(ui, path, create):
2687 def instance(ui, path, create):
2680 return localrepository(ui, util.urllocalpath(path), create)
2688 return localrepository(ui, util.urllocalpath(path), create)
2681
2689
2682 def islocal(path):
2690 def islocal(path):
2683 return True
2691 return True
General Comments 0
You need to be logged in to leave comments. Login now