##// END OF EJS Templates
localpeer: return only visible heads and branchmap...
Pierre-Yves David -
r17204:4feb55e6 default
parent child Browse files
Show More
@@ -1,266 +1,263
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import util, setdiscovery, treediscovery, phases
10 import util, setdiscovery, treediscovery, phases
11
11
12 def findcommonincoming(repo, remote, heads=None, force=False):
12 def findcommonincoming(repo, remote, heads=None, force=False):
13 """Return a tuple (common, anyincoming, heads) used to identify the common
13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 subset of nodes between repo and remote.
14 subset of nodes between repo and remote.
15
15
16 "common" is a list of (at least) the heads of the common subset.
16 "common" is a list of (at least) the heads of the common subset.
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 locally. If remote does not support getbundle, this actually is a list of
18 locally. If remote does not support getbundle, this actually is a list of
19 roots of the nodes that would be incoming, to be supplied to
19 roots of the nodes that would be incoming, to be supplied to
20 changegroupsubset. No code except for pull should be relying on this fact
20 changegroupsubset. No code except for pull should be relying on this fact
21 any longer.
21 any longer.
22 "heads" is either the supplied heads, or else the remote's heads.
22 "heads" is either the supplied heads, or else the remote's heads.
23
23
24 If you pass heads and they are all known locally, the reponse lists justs
24 If you pass heads and they are all known locally, the reponse lists justs
25 these heads in "common" and in "heads".
25 these heads in "common" and in "heads".
26
26
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 extensions a good hook into outgoing.
28 extensions a good hook into outgoing.
29 """
29 """
30
30
31 if not remote.capable('getbundle'):
31 if not remote.capable('getbundle'):
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33
33
34 if heads:
34 if heads:
35 allknown = True
35 allknown = True
36 nm = repo.changelog.nodemap
36 nm = repo.changelog.nodemap
37 for h in heads:
37 for h in heads:
38 if nm.get(h) is None:
38 if nm.get(h) is None:
39 allknown = False
39 allknown = False
40 break
40 break
41 if allknown:
41 if allknown:
42 return (heads, False, heads)
42 return (heads, False, heads)
43
43
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 abortwhenunrelated=not force)
45 abortwhenunrelated=not force)
46 common, anyinc, srvheads = res
46 common, anyinc, srvheads = res
47 return (list(common), anyinc, heads or list(srvheads))
47 return (list(common), anyinc, heads or list(srvheads))
48
48
49 class outgoing(object):
49 class outgoing(object):
50 '''Represents the set of nodes present in a local repo but not in a
50 '''Represents the set of nodes present in a local repo but not in a
51 (possibly) remote one.
51 (possibly) remote one.
52
52
53 Members:
53 Members:
54
54
55 missing is a list of all nodes present in local but not in remote.
55 missing is a list of all nodes present in local but not in remote.
56 common is a list of all nodes shared between the two repos.
56 common is a list of all nodes shared between the two repos.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 missingheads is the list of heads of missing.
58 missingheads is the list of heads of missing.
59 commonheads is the list of heads of common.
59 commonheads is the list of heads of common.
60
60
61 The sets are computed on demand from the heads, unless provided upfront
61 The sets are computed on demand from the heads, unless provided upfront
62 by discovery.'''
62 by discovery.'''
63
63
64 def __init__(self, revlog, commonheads, missingheads):
64 def __init__(self, revlog, commonheads, missingheads):
65 self.commonheads = commonheads
65 self.commonheads = commonheads
66 self.missingheads = missingheads
66 self.missingheads = missingheads
67 self._revlog = revlog
67 self._revlog = revlog
68 self._common = None
68 self._common = None
69 self._missing = None
69 self._missing = None
70 self.excluded = []
70 self.excluded = []
71
71
72 def _computecommonmissing(self):
72 def _computecommonmissing(self):
73 sets = self._revlog.findcommonmissing(self.commonheads,
73 sets = self._revlog.findcommonmissing(self.commonheads,
74 self.missingheads)
74 self.missingheads)
75 self._common, self._missing = sets
75 self._common, self._missing = sets
76
76
77 @util.propertycache
77 @util.propertycache
78 def common(self):
78 def common(self):
79 if self._common is None:
79 if self._common is None:
80 self._computecommonmissing()
80 self._computecommonmissing()
81 return self._common
81 return self._common
82
82
83 @util.propertycache
83 @util.propertycache
84 def missing(self):
84 def missing(self):
85 if self._missing is None:
85 if self._missing is None:
86 self._computecommonmissing()
86 self._computecommonmissing()
87 return self._missing
87 return self._missing
88
88
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 commoninc=None, portable=False):
90 commoninc=None, portable=False):
91 '''Return an outgoing instance to identify the nodes present in repo but
91 '''Return an outgoing instance to identify the nodes present in repo but
92 not in other.
92 not in other.
93
93
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 (inclusive) are included. If you already know the local repo's heads,
95 (inclusive) are included. If you already know the local repo's heads,
96 passing them in onlyheads is faster than letting them be recomputed here.
96 passing them in onlyheads is faster than letting them be recomputed here.
97
97
98 If commoninc is given, it must the the result of a prior call to
98 If commoninc is given, it must the the result of a prior call to
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100
100
101 If portable is given, compute more conservative common and missingheads,
101 If portable is given, compute more conservative common and missingheads,
102 to make bundles created from the instance more portable.'''
102 to make bundles created from the instance more portable.'''
103 # declare an empty outgoing object to be filled later
103 # declare an empty outgoing object to be filled later
104 og = outgoing(repo.changelog, None, None)
104 og = outgoing(repo.changelog, None, None)
105
105
106 # get common set if not provided
106 # get common set if not provided
107 if commoninc is None:
107 if commoninc is None:
108 commoninc = findcommonincoming(repo, other, force=force)
108 commoninc = findcommonincoming(repo, other, force=force)
109 og.commonheads, _any, _hds = commoninc
109 og.commonheads, _any, _hds = commoninc
110
110
111 # compute outgoing
111 # compute outgoing
112 if not repo._phasecache.phaseroots[phases.secret]:
112 if not repo._phasecache.phaseroots[phases.secret]:
113 og.missingheads = onlyheads or repo.heads()
113 og.missingheads = onlyheads or repo.heads()
114 elif onlyheads is None:
114 elif onlyheads is None:
115 # use visible heads as it should be cached
115 # use visible heads as it should be cached
116 og.missingheads = phases.visibleheads(repo)
116 og.missingheads = phases.visibleheads(repo)
117 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
117 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
118 else:
118 else:
119 # compute common, missing and exclude secret stuff
119 # compute common, missing and exclude secret stuff
120 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
120 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
121 og._common, allmissing = sets
121 og._common, allmissing = sets
122 og._missing = missing = []
122 og._missing = missing = []
123 og.excluded = excluded = []
123 og.excluded = excluded = []
124 for node in allmissing:
124 for node in allmissing:
125 if repo[node].phase() >= phases.secret:
125 if repo[node].phase() >= phases.secret:
126 excluded.append(node)
126 excluded.append(node)
127 else:
127 else:
128 missing.append(node)
128 missing.append(node)
129 if excluded:
129 if excluded:
130 # update missing heads
130 # update missing heads
131 missingheads = phases.newheads(repo, onlyheads, excluded)
131 missingheads = phases.newheads(repo, onlyheads, excluded)
132 else:
132 else:
133 missingheads = onlyheads
133 missingheads = onlyheads
134 og.missingheads = missingheads
134 og.missingheads = missingheads
135
135
136 if portable:
136 if portable:
137 # recompute common and missingheads as if -r<rev> had been given for
137 # recompute common and missingheads as if -r<rev> had been given for
138 # each head of missing, and --base <rev> for each head of the proper
138 # each head of missing, and --base <rev> for each head of the proper
139 # ancestors of missing
139 # ancestors of missing
140 og._computecommonmissing()
140 og._computecommonmissing()
141 cl = repo.changelog
141 cl = repo.changelog
142 missingrevs = set(cl.rev(n) for n in og._missing)
142 missingrevs = set(cl.rev(n) for n in og._missing)
143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
144 commonheads = set(og.commonheads)
144 commonheads = set(og.commonheads)
145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
146
146
147 return og
147 return og
148
148
149 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
149 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
150 """Check that a push won't add any outgoing head
150 """Check that a push won't add any outgoing head
151
151
152 raise Abort error and display ui message as needed.
152 raise Abort error and display ui message as needed.
153 """
153 """
154 if remoteheads == [nullid]:
154 if remoteheads == [nullid]:
155 # remote is empty, nothing to check.
155 # remote is empty, nothing to check.
156 return
156 return
157
157
158 cl = repo.changelog
158 cl = repo.changelog
159 if remote.capable('branchmap'):
159 if remote.capable('branchmap'):
160 # Check for each named branch if we're creating new remote heads.
160 # Check for each named branch if we're creating new remote heads.
161 # To be a remote head after push, node must be either:
161 # To be a remote head after push, node must be either:
162 # - unknown locally
162 # - unknown locally
163 # - a local outgoing head descended from update
163 # - a local outgoing head descended from update
164 # - a remote head that's known locally and not
164 # - a remote head that's known locally and not
165 # ancestral to an outgoing head
165 # ancestral to an outgoing head
166
166
167 # 1. Create set of branches involved in the push.
167 # 1. Create set of branches involved in the push.
168 branches = set(repo[n].branch() for n in outgoing.missing)
168 branches = set(repo[n].branch() for n in outgoing.missing)
169
169
170 # 2. Check for new branches on the remote.
170 # 2. Check for new branches on the remote.
171 if remote.local():
172 remotemap = phases.visiblebranchmap(remote.local())
173 else:
174 remotemap = remote.branchmap()
171 remotemap = remote.branchmap()
175 newbranches = branches - set(remotemap)
172 newbranches = branches - set(remotemap)
176 if newbranches and not newbranch: # new branch requires --new-branch
173 if newbranches and not newbranch: # new branch requires --new-branch
177 branchnames = ', '.join(sorted(newbranches))
174 branchnames = ', '.join(sorted(newbranches))
178 raise util.Abort(_("push creates new remote branches: %s!")
175 raise util.Abort(_("push creates new remote branches: %s!")
179 % branchnames,
176 % branchnames,
180 hint=_("use 'hg push --new-branch' to create"
177 hint=_("use 'hg push --new-branch' to create"
181 " new remote branches"))
178 " new remote branches"))
182 branches.difference_update(newbranches)
179 branches.difference_update(newbranches)
183
180
184 # 3. Construct the initial oldmap and newmap dicts.
181 # 3. Construct the initial oldmap and newmap dicts.
185 # They contain information about the remote heads before and
182 # They contain information about the remote heads before and
186 # after the push, respectively.
183 # after the push, respectively.
187 # Heads not found locally are not included in either dict,
184 # Heads not found locally are not included in either dict,
188 # since they won't be affected by the push.
185 # since they won't be affected by the push.
189 # unsynced contains all branches with incoming changesets.
186 # unsynced contains all branches with incoming changesets.
190 oldmap = {}
187 oldmap = {}
191 newmap = {}
188 newmap = {}
192 unsynced = set()
189 unsynced = set()
193 for branch in branches:
190 for branch in branches:
194 remotebrheads = remotemap[branch]
191 remotebrheads = remotemap[branch]
195 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
192 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
196 oldmap[branch] = prunedbrheads
193 oldmap[branch] = prunedbrheads
197 newmap[branch] = list(prunedbrheads)
194 newmap[branch] = list(prunedbrheads)
198 if len(remotebrheads) > len(prunedbrheads):
195 if len(remotebrheads) > len(prunedbrheads):
199 unsynced.add(branch)
196 unsynced.add(branch)
200
197
201 # 4. Update newmap with outgoing changes.
198 # 4. Update newmap with outgoing changes.
202 # This will possibly add new heads and remove existing ones.
199 # This will possibly add new heads and remove existing ones.
203 ctxgen = (repo[n] for n in outgoing.missing)
200 ctxgen = (repo[n] for n in outgoing.missing)
204 repo._updatebranchcache(newmap, ctxgen)
201 repo._updatebranchcache(newmap, ctxgen)
205
202
206 else:
203 else:
207 # 1-4b. old servers: Check for new topological heads.
204 # 1-4b. old servers: Check for new topological heads.
208 # Construct {old,new}map with branch = None (topological branch).
205 # Construct {old,new}map with branch = None (topological branch).
209 # (code based on _updatebranchcache)
206 # (code based on _updatebranchcache)
210 oldheads = set(h for h in remoteheads if h in cl.nodemap)
207 oldheads = set(h for h in remoteheads if h in cl.nodemap)
211 # all nodes in outgoing.missing are children of either:
208 # all nodes in outgoing.missing are children of either:
212 # - an element of oldheads
209 # - an element of oldheads
213 # - another element of outgoing.missing
210 # - another element of outgoing.missing
214 # - nullrev
211 # - nullrev
215 # This explains why the new head are very simple to compute.
212 # This explains why the new head are very simple to compute.
216 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
213 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
217 branches = set([None])
214 branches = set([None])
218 newmap = {None: list(c.node() for c in r)}
215 newmap = {None: list(c.node() for c in r)}
219 oldmap = {None: oldheads}
216 oldmap = {None: oldheads}
220 unsynced = inc and branches or set()
217 unsynced = inc and branches or set()
221
218
222 # 5. Check for new heads.
219 # 5. Check for new heads.
223 # If there are more heads after the push than before, a suitable
220 # If there are more heads after the push than before, a suitable
224 # error message, depending on unsynced status, is displayed.
221 # error message, depending on unsynced status, is displayed.
225 error = None
222 error = None
226 localbookmarks = repo._bookmarks
223 localbookmarks = repo._bookmarks
227
224
228 for branch in branches:
225 for branch in branches:
229 newhs = set(newmap[branch])
226 newhs = set(newmap[branch])
230 oldhs = set(oldmap[branch])
227 oldhs = set(oldmap[branch])
231 dhs = None
228 dhs = None
232 if len(newhs) > len(oldhs):
229 if len(newhs) > len(oldhs):
233 # strip updates to existing remote heads from the new heads list
230 # strip updates to existing remote heads from the new heads list
234 remotebookmarks = remote.listkeys('bookmarks')
231 remotebookmarks = remote.listkeys('bookmarks')
235 bookmarkedheads = set()
232 bookmarkedheads = set()
236 for bm in localbookmarks:
233 for bm in localbookmarks:
237 rnode = remotebookmarks.get(bm)
234 rnode = remotebookmarks.get(bm)
238 if rnode and rnode in repo:
235 if rnode and rnode in repo:
239 lctx, rctx = repo[bm], repo[rnode]
236 lctx, rctx = repo[bm], repo[rnode]
240 if rctx == lctx.ancestor(rctx):
237 if rctx == lctx.ancestor(rctx):
241 bookmarkedheads.add(lctx.node())
238 bookmarkedheads.add(lctx.node())
242 dhs = list(newhs - bookmarkedheads - oldhs)
239 dhs = list(newhs - bookmarkedheads - oldhs)
243 if dhs:
240 if dhs:
244 if error is None:
241 if error is None:
245 if branch not in ('default', None):
242 if branch not in ('default', None):
246 error = _("push creates new remote head %s "
243 error = _("push creates new remote head %s "
247 "on branch '%s'!") % (short(dhs[0]), branch)
244 "on branch '%s'!") % (short(dhs[0]), branch)
248 else:
245 else:
249 error = _("push creates new remote head %s!"
246 error = _("push creates new remote head %s!"
250 ) % short(dhs[0])
247 ) % short(dhs[0])
251 if branch in unsynced:
248 if branch in unsynced:
252 hint = _("you should pull and merge or "
249 hint = _("you should pull and merge or "
253 "use push -f to force")
250 "use push -f to force")
254 else:
251 else:
255 hint = _("did you forget to merge? "
252 hint = _("did you forget to merge? "
256 "use push -f to force")
253 "use push -f to force")
257 if branch is not None:
254 if branch is not None:
258 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
255 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
259 for h in dhs:
256 for h in dhs:
260 repo.ui.note(_("new remote head %s\n") % short(h))
257 repo.ui.note(_("new remote head %s\n") % short(h))
261 if error:
258 if error:
262 raise util.Abort(error, hint=hint)
259 raise util.Abort(error, hint=hint)
263
260
264 # 6. Check for unsynced changes on involved branches.
261 # 6. Check for unsynced changes on involved branches.
265 if unsynced:
262 if unsynced:
266 repo.ui.warn(_("note: unsynced remote changes!\n"))
263 repo.ui.warn(_("note: unsynced remote changes!\n"))
@@ -1,2568 +1,2568
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return self._repo.branchmap()
59 return phases.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return self._repo.heads()
62 return phases.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wopener = scmutil.opener(path, expand=True)
120 self.wopener = scmutil.opener(path, expand=True)
121 self.wvfs = self.wopener
121 self.wvfs = self.wopener
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.opener = scmutil.opener(self.path)
126 self.opener = scmutil.opener(self.path)
127 self.vfs = self.opener
127 self.vfs = self.opener
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134
134
135 try:
135 try:
136 self.ui.readconfig(self.join("hgrc"), self.root)
136 self.ui.readconfig(self.join("hgrc"), self.root)
137 extensions.loadall(self.ui)
137 extensions.loadall(self.ui)
138 except IOError:
138 except IOError:
139 pass
139 pass
140
140
141 if not self.vfs.isdir():
141 if not self.vfs.isdir():
142 if create:
142 if create:
143 if not self.wvfs.exists():
143 if not self.wvfs.exists():
144 self.wvfs.makedirs()
144 self.wvfs.makedirs()
145 self.vfs.makedir(notindexed=True)
145 self.vfs.makedir(notindexed=True)
146 requirements = self._baserequirements(create)
146 requirements = self._baserequirements(create)
147 if self.ui.configbool('format', 'usestore', True):
147 if self.ui.configbool('format', 'usestore', True):
148 self.vfs.mkdir("store")
148 self.vfs.mkdir("store")
149 requirements.append("store")
149 requirements.append("store")
150 if self.ui.configbool('format', 'usefncache', True):
150 if self.ui.configbool('format', 'usefncache', True):
151 requirements.append("fncache")
151 requirements.append("fncache")
152 if self.ui.configbool('format', 'dotencode', True):
152 if self.ui.configbool('format', 'dotencode', True):
153 requirements.append('dotencode')
153 requirements.append('dotencode')
154 # create an invalid changelog
154 # create an invalid changelog
155 self.vfs.append(
155 self.vfs.append(
156 "00changelog.i",
156 "00changelog.i",
157 '\0\0\0\2' # represents revlogv2
157 '\0\0\0\2' # represents revlogv2
158 ' dummy changelog to prevent using the old repo layout'
158 ' dummy changelog to prevent using the old repo layout'
159 )
159 )
160 if self.ui.configbool('format', 'generaldelta', False):
160 if self.ui.configbool('format', 'generaldelta', False):
161 requirements.append("generaldelta")
161 requirements.append("generaldelta")
162 requirements = set(requirements)
162 requirements = set(requirements)
163 else:
163 else:
164 raise error.RepoError(_("repository %s not found") % path)
164 raise error.RepoError(_("repository %s not found") % path)
165 elif create:
165 elif create:
166 raise error.RepoError(_("repository %s already exists") % path)
166 raise error.RepoError(_("repository %s already exists") % path)
167 else:
167 else:
168 try:
168 try:
169 requirements = scmutil.readrequires(self.vfs, self.supported)
169 requirements = scmutil.readrequires(self.vfs, self.supported)
170 except IOError, inst:
170 except IOError, inst:
171 if inst.errno != errno.ENOENT:
171 if inst.errno != errno.ENOENT:
172 raise
172 raise
173 requirements = set()
173 requirements = set()
174
174
175 self.sharedpath = self.path
175 self.sharedpath = self.path
176 try:
176 try:
177 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
178 if not os.path.exists(s):
178 if not os.path.exists(s):
179 raise error.RepoError(
179 raise error.RepoError(
180 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 _('.hg/sharedpath points to nonexistent directory %s') % s)
181 self.sharedpath = s
181 self.sharedpath = s
182 except IOError, inst:
182 except IOError, inst:
183 if inst.errno != errno.ENOENT:
183 if inst.errno != errno.ENOENT:
184 raise
184 raise
185
185
186 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
187 self.spath = self.store.path
187 self.spath = self.store.path
188 self.sopener = self.store.opener
188 self.sopener = self.store.opener
189 self.svfs = self.sopener
189 self.svfs = self.sopener
190 self.sjoin = self.store.join
190 self.sjoin = self.store.join
191 self.opener.createmode = self.store.createmode
191 self.opener.createmode = self.store.createmode
192 self._applyrequirements(requirements)
192 self._applyrequirements(requirements)
193 if create:
193 if create:
194 self._writerequirements()
194 self._writerequirements()
195
195
196
196
197 self._branchcache = None
197 self._branchcache = None
198 self._branchcachetip = None
198 self._branchcachetip = None
199 self.filterpats = {}
199 self.filterpats = {}
200 self._datafilters = {}
200 self._datafilters = {}
201 self._transref = self._lockref = self._wlockref = None
201 self._transref = self._lockref = self._wlockref = None
202
202
203 # A cache for various files under .hg/ that tracks file changes,
203 # A cache for various files under .hg/ that tracks file changes,
204 # (used by the filecache decorator)
204 # (used by the filecache decorator)
205 #
205 #
206 # Maps a property name to its util.filecacheentry
206 # Maps a property name to its util.filecacheentry
207 self._filecache = {}
207 self._filecache = {}
208
208
209 def close(self):
209 def close(self):
210 pass
210 pass
211
211
212 def _restrictcapabilities(self, caps):
212 def _restrictcapabilities(self, caps):
213 return caps
213 return caps
214
214
215 def _applyrequirements(self, requirements):
215 def _applyrequirements(self, requirements):
216 self.requirements = requirements
216 self.requirements = requirements
217 self.sopener.options = dict((r, 1) for r in requirements
217 self.sopener.options = dict((r, 1) for r in requirements
218 if r in self.openerreqs)
218 if r in self.openerreqs)
219
219
220 def _writerequirements(self):
220 def _writerequirements(self):
221 reqfile = self.opener("requires", "w")
221 reqfile = self.opener("requires", "w")
222 for r in self.requirements:
222 for r in self.requirements:
223 reqfile.write("%s\n" % r)
223 reqfile.write("%s\n" % r)
224 reqfile.close()
224 reqfile.close()
225
225
226 def _checknested(self, path):
226 def _checknested(self, path):
227 """Determine if path is a legal nested repository."""
227 """Determine if path is a legal nested repository."""
228 if not path.startswith(self.root):
228 if not path.startswith(self.root):
229 return False
229 return False
230 subpath = path[len(self.root) + 1:]
230 subpath = path[len(self.root) + 1:]
231 normsubpath = util.pconvert(subpath)
231 normsubpath = util.pconvert(subpath)
232
232
233 # XXX: Checking against the current working copy is wrong in
233 # XXX: Checking against the current working copy is wrong in
234 # the sense that it can reject things like
234 # the sense that it can reject things like
235 #
235 #
236 # $ hg cat -r 10 sub/x.txt
236 # $ hg cat -r 10 sub/x.txt
237 #
237 #
238 # if sub/ is no longer a subrepository in the working copy
238 # if sub/ is no longer a subrepository in the working copy
239 # parent revision.
239 # parent revision.
240 #
240 #
241 # However, it can of course also allow things that would have
241 # However, it can of course also allow things that would have
242 # been rejected before, such as the above cat command if sub/
242 # been rejected before, such as the above cat command if sub/
243 # is a subrepository now, but was a normal directory before.
243 # is a subrepository now, but was a normal directory before.
244 # The old path auditor would have rejected by mistake since it
244 # The old path auditor would have rejected by mistake since it
245 # panics when it sees sub/.hg/.
245 # panics when it sees sub/.hg/.
246 #
246 #
247 # All in all, checking against the working copy seems sensible
247 # All in all, checking against the working copy seems sensible
248 # since we want to prevent access to nested repositories on
248 # since we want to prevent access to nested repositories on
249 # the filesystem *now*.
249 # the filesystem *now*.
250 ctx = self[None]
250 ctx = self[None]
251 parts = util.splitpath(subpath)
251 parts = util.splitpath(subpath)
252 while parts:
252 while parts:
253 prefix = '/'.join(parts)
253 prefix = '/'.join(parts)
254 if prefix in ctx.substate:
254 if prefix in ctx.substate:
255 if prefix == normsubpath:
255 if prefix == normsubpath:
256 return True
256 return True
257 else:
257 else:
258 sub = ctx.sub(prefix)
258 sub = ctx.sub(prefix)
259 return sub.checknested(subpath[len(prefix) + 1:])
259 return sub.checknested(subpath[len(prefix) + 1:])
260 else:
260 else:
261 parts.pop()
261 parts.pop()
262 return False
262 return False
263
263
264 def peer(self):
264 def peer(self):
265 return localpeer(self) # not cached to avoid reference cycle
265 return localpeer(self) # not cached to avoid reference cycle
266
266
267 @filecache('bookmarks')
267 @filecache('bookmarks')
268 def _bookmarks(self):
268 def _bookmarks(self):
269 return bookmarks.read(self)
269 return bookmarks.read(self)
270
270
271 @filecache('bookmarks.current')
271 @filecache('bookmarks.current')
272 def _bookmarkcurrent(self):
272 def _bookmarkcurrent(self):
273 return bookmarks.readcurrent(self)
273 return bookmarks.readcurrent(self)
274
274
275 def _writebookmarks(self, marks):
275 def _writebookmarks(self, marks):
276 bookmarks.write(self)
276 bookmarks.write(self)
277
277
278 def bookmarkheads(self, bookmark):
278 def bookmarkheads(self, bookmark):
279 name = bookmark.split('@', 1)[0]
279 name = bookmark.split('@', 1)[0]
280 heads = []
280 heads = []
281 for mark, n in self._bookmarks.iteritems():
281 for mark, n in self._bookmarks.iteritems():
282 if mark.split('@', 1)[0] == name:
282 if mark.split('@', 1)[0] == name:
283 heads.append(n)
283 heads.append(n)
284 return heads
284 return heads
285
285
286 @storecache('phaseroots')
286 @storecache('phaseroots')
287 def _phasecache(self):
287 def _phasecache(self):
288 return phases.phasecache(self, self._phasedefaults)
288 return phases.phasecache(self, self._phasedefaults)
289
289
290 @storecache('obsstore')
290 @storecache('obsstore')
291 def obsstore(self):
291 def obsstore(self):
292 store = obsolete.obsstore(self.sopener)
292 store = obsolete.obsstore(self.sopener)
293 return store
293 return store
294
294
295 @storecache('00changelog.i')
295 @storecache('00changelog.i')
296 def changelog(self):
296 def changelog(self):
297 c = changelog.changelog(self.sopener)
297 c = changelog.changelog(self.sopener)
298 if 'HG_PENDING' in os.environ:
298 if 'HG_PENDING' in os.environ:
299 p = os.environ['HG_PENDING']
299 p = os.environ['HG_PENDING']
300 if p.startswith(self.root):
300 if p.startswith(self.root):
301 c.readpending('00changelog.i.a')
301 c.readpending('00changelog.i.a')
302 return c
302 return c
303
303
304 @storecache('00manifest.i')
304 @storecache('00manifest.i')
305 def manifest(self):
305 def manifest(self):
306 return manifest.manifest(self.sopener)
306 return manifest.manifest(self.sopener)
307
307
308 @filecache('dirstate')
308 @filecache('dirstate')
309 def dirstate(self):
309 def dirstate(self):
310 warned = [0]
310 warned = [0]
311 def validate(node):
311 def validate(node):
312 try:
312 try:
313 self.changelog.rev(node)
313 self.changelog.rev(node)
314 return node
314 return node
315 except error.LookupError:
315 except error.LookupError:
316 if not warned[0]:
316 if not warned[0]:
317 warned[0] = True
317 warned[0] = True
318 self.ui.warn(_("warning: ignoring unknown"
318 self.ui.warn(_("warning: ignoring unknown"
319 " working parent %s!\n") % short(node))
319 " working parent %s!\n") % short(node))
320 return nullid
320 return nullid
321
321
322 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
322 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
323
323
324 def __getitem__(self, changeid):
324 def __getitem__(self, changeid):
325 if changeid is None:
325 if changeid is None:
326 return context.workingctx(self)
326 return context.workingctx(self)
327 return context.changectx(self, changeid)
327 return context.changectx(self, changeid)
328
328
329 def __contains__(self, changeid):
329 def __contains__(self, changeid):
330 try:
330 try:
331 return bool(self.lookup(changeid))
331 return bool(self.lookup(changeid))
332 except error.RepoLookupError:
332 except error.RepoLookupError:
333 return False
333 return False
334
334
335 def __nonzero__(self):
335 def __nonzero__(self):
336 return True
336 return True
337
337
338 def __len__(self):
338 def __len__(self):
339 return len(self.changelog)
339 return len(self.changelog)
340
340
341 def __iter__(self):
341 def __iter__(self):
342 for i in xrange(len(self)):
342 for i in xrange(len(self)):
343 yield i
343 yield i
344
344
345 def revs(self, expr, *args):
345 def revs(self, expr, *args):
346 '''Return a list of revisions matching the given revset'''
346 '''Return a list of revisions matching the given revset'''
347 expr = revset.formatspec(expr, *args)
347 expr = revset.formatspec(expr, *args)
348 m = revset.match(None, expr)
348 m = revset.match(None, expr)
349 return [r for r in m(self, range(len(self)))]
349 return [r for r in m(self, range(len(self)))]
350
350
351 def set(self, expr, *args):
351 def set(self, expr, *args):
352 '''
352 '''
353 Yield a context for each matching revision, after doing arg
353 Yield a context for each matching revision, after doing arg
354 replacement via revset.formatspec
354 replacement via revset.formatspec
355 '''
355 '''
356 for r in self.revs(expr, *args):
356 for r in self.revs(expr, *args):
357 yield self[r]
357 yield self[r]
358
358
359 def url(self):
359 def url(self):
360 return 'file:' + self.root
360 return 'file:' + self.root
361
361
362 def hook(self, name, throw=False, **args):
362 def hook(self, name, throw=False, **args):
363 return hook.hook(self.ui, self, name, throw, **args)
363 return hook.hook(self.ui, self, name, throw, **args)
364
364
365 tag_disallowed = ':\r\n'
365 tag_disallowed = ':\r\n'
366
366
367 def _tag(self, names, node, message, local, user, date, extra={}):
367 def _tag(self, names, node, message, local, user, date, extra={}):
368 if isinstance(names, str):
368 if isinstance(names, str):
369 allchars = names
369 allchars = names
370 names = (names,)
370 names = (names,)
371 else:
371 else:
372 allchars = ''.join(names)
372 allchars = ''.join(names)
373 for c in self.tag_disallowed:
373 for c in self.tag_disallowed:
374 if c in allchars:
374 if c in allchars:
375 raise util.Abort(_('%r cannot be used in a tag name') % c)
375 raise util.Abort(_('%r cannot be used in a tag name') % c)
376
376
377 branches = self.branchmap()
377 branches = self.branchmap()
378 for name in names:
378 for name in names:
379 self.hook('pretag', throw=True, node=hex(node), tag=name,
379 self.hook('pretag', throw=True, node=hex(node), tag=name,
380 local=local)
380 local=local)
381 if name in branches:
381 if name in branches:
382 self.ui.warn(_("warning: tag %s conflicts with existing"
382 self.ui.warn(_("warning: tag %s conflicts with existing"
383 " branch name\n") % name)
383 " branch name\n") % name)
384
384
385 def writetags(fp, names, munge, prevtags):
385 def writetags(fp, names, munge, prevtags):
386 fp.seek(0, 2)
386 fp.seek(0, 2)
387 if prevtags and prevtags[-1] != '\n':
387 if prevtags and prevtags[-1] != '\n':
388 fp.write('\n')
388 fp.write('\n')
389 for name in names:
389 for name in names:
390 m = munge and munge(name) or name
390 m = munge and munge(name) or name
391 if (self._tagscache.tagtypes and
391 if (self._tagscache.tagtypes and
392 name in self._tagscache.tagtypes):
392 name in self._tagscache.tagtypes):
393 old = self.tags().get(name, nullid)
393 old = self.tags().get(name, nullid)
394 fp.write('%s %s\n' % (hex(old), m))
394 fp.write('%s %s\n' % (hex(old), m))
395 fp.write('%s %s\n' % (hex(node), m))
395 fp.write('%s %s\n' % (hex(node), m))
396 fp.close()
396 fp.close()
397
397
398 prevtags = ''
398 prevtags = ''
399 if local:
399 if local:
400 try:
400 try:
401 fp = self.opener('localtags', 'r+')
401 fp = self.opener('localtags', 'r+')
402 except IOError:
402 except IOError:
403 fp = self.opener('localtags', 'a')
403 fp = self.opener('localtags', 'a')
404 else:
404 else:
405 prevtags = fp.read()
405 prevtags = fp.read()
406
406
407 # local tags are stored in the current charset
407 # local tags are stored in the current charset
408 writetags(fp, names, None, prevtags)
408 writetags(fp, names, None, prevtags)
409 for name in names:
409 for name in names:
410 self.hook('tag', node=hex(node), tag=name, local=local)
410 self.hook('tag', node=hex(node), tag=name, local=local)
411 return
411 return
412
412
413 try:
413 try:
414 fp = self.wfile('.hgtags', 'rb+')
414 fp = self.wfile('.hgtags', 'rb+')
415 except IOError, e:
415 except IOError, e:
416 if e.errno != errno.ENOENT:
416 if e.errno != errno.ENOENT:
417 raise
417 raise
418 fp = self.wfile('.hgtags', 'ab')
418 fp = self.wfile('.hgtags', 'ab')
419 else:
419 else:
420 prevtags = fp.read()
420 prevtags = fp.read()
421
421
422 # committed tags are stored in UTF-8
422 # committed tags are stored in UTF-8
423 writetags(fp, names, encoding.fromlocal, prevtags)
423 writetags(fp, names, encoding.fromlocal, prevtags)
424
424
425 fp.close()
425 fp.close()
426
426
427 self.invalidatecaches()
427 self.invalidatecaches()
428
428
429 if '.hgtags' not in self.dirstate:
429 if '.hgtags' not in self.dirstate:
430 self[None].add(['.hgtags'])
430 self[None].add(['.hgtags'])
431
431
432 m = matchmod.exact(self.root, '', ['.hgtags'])
432 m = matchmod.exact(self.root, '', ['.hgtags'])
433 tagnode = self.commit(message, user, date, extra=extra, match=m)
433 tagnode = self.commit(message, user, date, extra=extra, match=m)
434
434
435 for name in names:
435 for name in names:
436 self.hook('tag', node=hex(node), tag=name, local=local)
436 self.hook('tag', node=hex(node), tag=name, local=local)
437
437
438 return tagnode
438 return tagnode
439
439
440 def tag(self, names, node, message, local, user, date):
440 def tag(self, names, node, message, local, user, date):
441 '''tag a revision with one or more symbolic names.
441 '''tag a revision with one or more symbolic names.
442
442
443 names is a list of strings or, when adding a single tag, names may be a
443 names is a list of strings or, when adding a single tag, names may be a
444 string.
444 string.
445
445
446 if local is True, the tags are stored in a per-repository file.
446 if local is True, the tags are stored in a per-repository file.
447 otherwise, they are stored in the .hgtags file, and a new
447 otherwise, they are stored in the .hgtags file, and a new
448 changeset is committed with the change.
448 changeset is committed with the change.
449
449
450 keyword arguments:
450 keyword arguments:
451
451
452 local: whether to store tags in non-version-controlled file
452 local: whether to store tags in non-version-controlled file
453 (default False)
453 (default False)
454
454
455 message: commit message to use if committing
455 message: commit message to use if committing
456
456
457 user: name of user to use if committing
457 user: name of user to use if committing
458
458
459 date: date tuple to use if committing'''
459 date: date tuple to use if committing'''
460
460
461 if not local:
461 if not local:
462 for x in self.status()[:5]:
462 for x in self.status()[:5]:
463 if '.hgtags' in x:
463 if '.hgtags' in x:
464 raise util.Abort(_('working copy of .hgtags is changed '
464 raise util.Abort(_('working copy of .hgtags is changed '
465 '(please commit .hgtags manually)'))
465 '(please commit .hgtags manually)'))
466
466
467 self.tags() # instantiate the cache
467 self.tags() # instantiate the cache
468 self._tag(names, node, message, local, user, date)
468 self._tag(names, node, message, local, user, date)
469
469
470 @propertycache
470 @propertycache
471 def _tagscache(self):
471 def _tagscache(self):
472 '''Returns a tagscache object that contains various tags related
472 '''Returns a tagscache object that contains various tags related
473 caches.'''
473 caches.'''
474
474
475 # This simplifies its cache management by having one decorated
475 # This simplifies its cache management by having one decorated
476 # function (this one) and the rest simply fetch things from it.
476 # function (this one) and the rest simply fetch things from it.
477 class tagscache(object):
477 class tagscache(object):
478 def __init__(self):
478 def __init__(self):
479 # These two define the set of tags for this repository. tags
479 # These two define the set of tags for this repository. tags
480 # maps tag name to node; tagtypes maps tag name to 'global' or
480 # maps tag name to node; tagtypes maps tag name to 'global' or
481 # 'local'. (Global tags are defined by .hgtags across all
481 # 'local'. (Global tags are defined by .hgtags across all
482 # heads, and local tags are defined in .hg/localtags.)
482 # heads, and local tags are defined in .hg/localtags.)
483 # They constitute the in-memory cache of tags.
483 # They constitute the in-memory cache of tags.
484 self.tags = self.tagtypes = None
484 self.tags = self.tagtypes = None
485
485
486 self.nodetagscache = self.tagslist = None
486 self.nodetagscache = self.tagslist = None
487
487
488 cache = tagscache()
488 cache = tagscache()
489 cache.tags, cache.tagtypes = self._findtags()
489 cache.tags, cache.tagtypes = self._findtags()
490
490
491 return cache
491 return cache
492
492
493 def tags(self):
493 def tags(self):
494 '''return a mapping of tag to node'''
494 '''return a mapping of tag to node'''
495 t = {}
495 t = {}
496 for k, v in self._tagscache.tags.iteritems():
496 for k, v in self._tagscache.tags.iteritems():
497 try:
497 try:
498 # ignore tags to unknown nodes
498 # ignore tags to unknown nodes
499 self.changelog.rev(v)
499 self.changelog.rev(v)
500 t[k] = v
500 t[k] = v
501 except (error.LookupError, ValueError):
501 except (error.LookupError, ValueError):
502 pass
502 pass
503 return t
503 return t
504
504
505 def _findtags(self):
505 def _findtags(self):
506 '''Do the hard work of finding tags. Return a pair of dicts
506 '''Do the hard work of finding tags. Return a pair of dicts
507 (tags, tagtypes) where tags maps tag name to node, and tagtypes
507 (tags, tagtypes) where tags maps tag name to node, and tagtypes
508 maps tag name to a string like \'global\' or \'local\'.
508 maps tag name to a string like \'global\' or \'local\'.
509 Subclasses or extensions are free to add their own tags, but
509 Subclasses or extensions are free to add their own tags, but
510 should be aware that the returned dicts will be retained for the
510 should be aware that the returned dicts will be retained for the
511 duration of the localrepo object.'''
511 duration of the localrepo object.'''
512
512
513 # XXX what tagtype should subclasses/extensions use? Currently
513 # XXX what tagtype should subclasses/extensions use? Currently
514 # mq and bookmarks add tags, but do not set the tagtype at all.
514 # mq and bookmarks add tags, but do not set the tagtype at all.
515 # Should each extension invent its own tag type? Should there
515 # Should each extension invent its own tag type? Should there
516 # be one tagtype for all such "virtual" tags? Or is the status
516 # be one tagtype for all such "virtual" tags? Or is the status
517 # quo fine?
517 # quo fine?
518
518
519 alltags = {} # map tag name to (node, hist)
519 alltags = {} # map tag name to (node, hist)
520 tagtypes = {}
520 tagtypes = {}
521
521
522 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
522 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
523 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
523 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
524
524
525 # Build the return dicts. Have to re-encode tag names because
525 # Build the return dicts. Have to re-encode tag names because
526 # the tags module always uses UTF-8 (in order not to lose info
526 # the tags module always uses UTF-8 (in order not to lose info
527 # writing to the cache), but the rest of Mercurial wants them in
527 # writing to the cache), but the rest of Mercurial wants them in
528 # local encoding.
528 # local encoding.
529 tags = {}
529 tags = {}
530 for (name, (node, hist)) in alltags.iteritems():
530 for (name, (node, hist)) in alltags.iteritems():
531 if node != nullid:
531 if node != nullid:
532 tags[encoding.tolocal(name)] = node
532 tags[encoding.tolocal(name)] = node
533 tags['tip'] = self.changelog.tip()
533 tags['tip'] = self.changelog.tip()
534 tagtypes = dict([(encoding.tolocal(name), value)
534 tagtypes = dict([(encoding.tolocal(name), value)
535 for (name, value) in tagtypes.iteritems()])
535 for (name, value) in tagtypes.iteritems()])
536 return (tags, tagtypes)
536 return (tags, tagtypes)
537
537
538 def tagtype(self, tagname):
538 def tagtype(self, tagname):
539 '''
539 '''
540 return the type of the given tag. result can be:
540 return the type of the given tag. result can be:
541
541
542 'local' : a local tag
542 'local' : a local tag
543 'global' : a global tag
543 'global' : a global tag
544 None : tag does not exist
544 None : tag does not exist
545 '''
545 '''
546
546
547 return self._tagscache.tagtypes.get(tagname)
547 return self._tagscache.tagtypes.get(tagname)
548
548
549 def tagslist(self):
549 def tagslist(self):
550 '''return a list of tags ordered by revision'''
550 '''return a list of tags ordered by revision'''
551 if not self._tagscache.tagslist:
551 if not self._tagscache.tagslist:
552 l = []
552 l = []
553 for t, n in self.tags().iteritems():
553 for t, n in self.tags().iteritems():
554 r = self.changelog.rev(n)
554 r = self.changelog.rev(n)
555 l.append((r, t, n))
555 l.append((r, t, n))
556 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
556 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
557
557
558 return self._tagscache.tagslist
558 return self._tagscache.tagslist
559
559
560 def nodetags(self, node):
560 def nodetags(self, node):
561 '''return the tags associated with a node'''
561 '''return the tags associated with a node'''
562 if not self._tagscache.nodetagscache:
562 if not self._tagscache.nodetagscache:
563 nodetagscache = {}
563 nodetagscache = {}
564 for t, n in self._tagscache.tags.iteritems():
564 for t, n in self._tagscache.tags.iteritems():
565 nodetagscache.setdefault(n, []).append(t)
565 nodetagscache.setdefault(n, []).append(t)
566 for tags in nodetagscache.itervalues():
566 for tags in nodetagscache.itervalues():
567 tags.sort()
567 tags.sort()
568 self._tagscache.nodetagscache = nodetagscache
568 self._tagscache.nodetagscache = nodetagscache
569 return self._tagscache.nodetagscache.get(node, [])
569 return self._tagscache.nodetagscache.get(node, [])
570
570
571 def nodebookmarks(self, node):
571 def nodebookmarks(self, node):
572 marks = []
572 marks = []
573 for bookmark, n in self._bookmarks.iteritems():
573 for bookmark, n in self._bookmarks.iteritems():
574 if n == node:
574 if n == node:
575 marks.append(bookmark)
575 marks.append(bookmark)
576 return sorted(marks)
576 return sorted(marks)
577
577
578 def _branchtags(self, partial, lrev):
578 def _branchtags(self, partial, lrev):
579 # TODO: rename this function?
579 # TODO: rename this function?
580 tiprev = len(self) - 1
580 tiprev = len(self) - 1
581 if lrev != tiprev:
581 if lrev != tiprev:
582 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
582 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
583 self._updatebranchcache(partial, ctxgen)
583 self._updatebranchcache(partial, ctxgen)
584 self._writebranchcache(partial, self.changelog.tip(), tiprev)
584 self._writebranchcache(partial, self.changelog.tip(), tiprev)
585
585
586 return partial
586 return partial
587
587
588 def updatebranchcache(self):
588 def updatebranchcache(self):
589 tip = self.changelog.tip()
589 tip = self.changelog.tip()
590 if self._branchcache is not None and self._branchcachetip == tip:
590 if self._branchcache is not None and self._branchcachetip == tip:
591 return
591 return
592
592
593 oldtip = self._branchcachetip
593 oldtip = self._branchcachetip
594 self._branchcachetip = tip
594 self._branchcachetip = tip
595 if oldtip is None or oldtip not in self.changelog.nodemap:
595 if oldtip is None or oldtip not in self.changelog.nodemap:
596 partial, last, lrev = self._readbranchcache()
596 partial, last, lrev = self._readbranchcache()
597 else:
597 else:
598 lrev = self.changelog.rev(oldtip)
598 lrev = self.changelog.rev(oldtip)
599 partial = self._branchcache
599 partial = self._branchcache
600
600
601 self._branchtags(partial, lrev)
601 self._branchtags(partial, lrev)
602 # this private cache holds all heads (not just the branch tips)
602 # this private cache holds all heads (not just the branch tips)
603 self._branchcache = partial
603 self._branchcache = partial
604
604
605 def branchmap(self):
605 def branchmap(self):
606 '''returns a dictionary {branch: [branchheads]}'''
606 '''returns a dictionary {branch: [branchheads]}'''
607 self.updatebranchcache()
607 self.updatebranchcache()
608 return self._branchcache
608 return self._branchcache
609
609
610 def _branchtip(self, heads):
610 def _branchtip(self, heads):
611 '''return the tipmost branch head in heads'''
611 '''return the tipmost branch head in heads'''
612 tip = heads[-1]
612 tip = heads[-1]
613 for h in reversed(heads):
613 for h in reversed(heads):
614 if not self[h].closesbranch():
614 if not self[h].closesbranch():
615 tip = h
615 tip = h
616 break
616 break
617 return tip
617 return tip
618
618
619 def branchtip(self, branch):
619 def branchtip(self, branch):
620 '''return the tip node for a given branch'''
620 '''return the tip node for a given branch'''
621 if branch not in self.branchmap():
621 if branch not in self.branchmap():
622 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
622 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
623 return self._branchtip(self.branchmap()[branch])
623 return self._branchtip(self.branchmap()[branch])
624
624
625 def branchtags(self):
625 def branchtags(self):
626 '''return a dict where branch names map to the tipmost head of
626 '''return a dict where branch names map to the tipmost head of
627 the branch, open heads come before closed'''
627 the branch, open heads come before closed'''
628 bt = {}
628 bt = {}
629 for bn, heads in self.branchmap().iteritems():
629 for bn, heads in self.branchmap().iteritems():
630 bt[bn] = self._branchtip(heads)
630 bt[bn] = self._branchtip(heads)
631 return bt
631 return bt
632
632
633 def _readbranchcache(self):
633 def _readbranchcache(self):
634 partial = {}
634 partial = {}
635 try:
635 try:
636 f = self.opener("cache/branchheads")
636 f = self.opener("cache/branchheads")
637 lines = f.read().split('\n')
637 lines = f.read().split('\n')
638 f.close()
638 f.close()
639 except (IOError, OSError):
639 except (IOError, OSError):
640 return {}, nullid, nullrev
640 return {}, nullid, nullrev
641
641
642 try:
642 try:
643 last, lrev = lines.pop(0).split(" ", 1)
643 last, lrev = lines.pop(0).split(" ", 1)
644 last, lrev = bin(last), int(lrev)
644 last, lrev = bin(last), int(lrev)
645 if lrev >= len(self) or self[lrev].node() != last:
645 if lrev >= len(self) or self[lrev].node() != last:
646 # invalidate the cache
646 # invalidate the cache
647 raise ValueError('invalidating branch cache (tip differs)')
647 raise ValueError('invalidating branch cache (tip differs)')
648 for l in lines:
648 for l in lines:
649 if not l:
649 if not l:
650 continue
650 continue
651 node, label = l.split(" ", 1)
651 node, label = l.split(" ", 1)
652 label = encoding.tolocal(label.strip())
652 label = encoding.tolocal(label.strip())
653 if not node in self:
653 if not node in self:
654 raise ValueError('invalidating branch cache because node '+
654 raise ValueError('invalidating branch cache because node '+
655 '%s does not exist' % node)
655 '%s does not exist' % node)
656 partial.setdefault(label, []).append(bin(node))
656 partial.setdefault(label, []).append(bin(node))
657 except KeyboardInterrupt:
657 except KeyboardInterrupt:
658 raise
658 raise
659 except Exception, inst:
659 except Exception, inst:
660 if self.ui.debugflag:
660 if self.ui.debugflag:
661 self.ui.warn(str(inst), '\n')
661 self.ui.warn(str(inst), '\n')
662 partial, last, lrev = {}, nullid, nullrev
662 partial, last, lrev = {}, nullid, nullrev
663 return partial, last, lrev
663 return partial, last, lrev
664
664
665 def _writebranchcache(self, branches, tip, tiprev):
665 def _writebranchcache(self, branches, tip, tiprev):
666 try:
666 try:
667 f = self.opener("cache/branchheads", "w", atomictemp=True)
667 f = self.opener("cache/branchheads", "w", atomictemp=True)
668 f.write("%s %s\n" % (hex(tip), tiprev))
668 f.write("%s %s\n" % (hex(tip), tiprev))
669 for label, nodes in branches.iteritems():
669 for label, nodes in branches.iteritems():
670 for node in nodes:
670 for node in nodes:
671 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
671 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
672 f.close()
672 f.close()
673 except (IOError, OSError):
673 except (IOError, OSError):
674 pass
674 pass
675
675
676 def _updatebranchcache(self, partial, ctxgen):
676 def _updatebranchcache(self, partial, ctxgen):
677 """Given a branchhead cache, partial, that may have extra nodes or be
677 """Given a branchhead cache, partial, that may have extra nodes or be
678 missing heads, and a generator of nodes that are at least a superset of
678 missing heads, and a generator of nodes that are at least a superset of
679 heads missing, this function updates partial to be correct.
679 heads missing, this function updates partial to be correct.
680 """
680 """
681 # collect new branch entries
681 # collect new branch entries
682 newbranches = {}
682 newbranches = {}
683 for c in ctxgen:
683 for c in ctxgen:
684 newbranches.setdefault(c.branch(), []).append(c.node())
684 newbranches.setdefault(c.branch(), []).append(c.node())
685 # if older branchheads are reachable from new ones, they aren't
685 # if older branchheads are reachable from new ones, they aren't
686 # really branchheads. Note checking parents is insufficient:
686 # really branchheads. Note checking parents is insufficient:
687 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
687 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
688 for branch, newnodes in newbranches.iteritems():
688 for branch, newnodes in newbranches.iteritems():
689 bheads = partial.setdefault(branch, [])
689 bheads = partial.setdefault(branch, [])
690 # Remove candidate heads that no longer are in the repo (e.g., as
690 # Remove candidate heads that no longer are in the repo (e.g., as
691 # the result of a strip that just happened). Avoid using 'node in
691 # the result of a strip that just happened). Avoid using 'node in
692 # self' here because that dives down into branchcache code somewhat
692 # self' here because that dives down into branchcache code somewhat
693 # recrusively.
693 # recrusively.
694 bheadrevs = [self.changelog.rev(node) for node in bheads
694 bheadrevs = [self.changelog.rev(node) for node in bheads
695 if self.changelog.hasnode(node)]
695 if self.changelog.hasnode(node)]
696 newheadrevs = [self.changelog.rev(node) for node in newnodes
696 newheadrevs = [self.changelog.rev(node) for node in newnodes
697 if self.changelog.hasnode(node)]
697 if self.changelog.hasnode(node)]
698 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
698 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
699 # Remove duplicates - nodes that are in newheadrevs and are already
699 # Remove duplicates - nodes that are in newheadrevs and are already
700 # in bheadrevs. This can happen if you strip a node whose parent
700 # in bheadrevs. This can happen if you strip a node whose parent
701 # was already a head (because they're on different branches).
701 # was already a head (because they're on different branches).
702 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
702 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
703
703
704 # Starting from tip means fewer passes over reachable. If we know
704 # Starting from tip means fewer passes over reachable. If we know
705 # the new candidates are not ancestors of existing heads, we don't
705 # the new candidates are not ancestors of existing heads, we don't
706 # have to examine ancestors of existing heads
706 # have to examine ancestors of existing heads
707 if ctxisnew:
707 if ctxisnew:
708 iterrevs = sorted(newheadrevs)
708 iterrevs = sorted(newheadrevs)
709 else:
709 else:
710 iterrevs = list(bheadrevs)
710 iterrevs = list(bheadrevs)
711
711
712 # This loop prunes out two kinds of heads - heads that are
712 # This loop prunes out two kinds of heads - heads that are
713 # superceded by a head in newheadrevs, and newheadrevs that are not
713 # superceded by a head in newheadrevs, and newheadrevs that are not
714 # heads because an existing head is their descendant.
714 # heads because an existing head is their descendant.
715 while iterrevs:
715 while iterrevs:
716 latest = iterrevs.pop()
716 latest = iterrevs.pop()
717 if latest not in bheadrevs:
717 if latest not in bheadrevs:
718 continue
718 continue
719 ancestors = set(self.changelog.ancestors([latest],
719 ancestors = set(self.changelog.ancestors([latest],
720 bheadrevs[0]))
720 bheadrevs[0]))
721 if ancestors:
721 if ancestors:
722 bheadrevs = [b for b in bheadrevs if b not in ancestors]
722 bheadrevs = [b for b in bheadrevs if b not in ancestors]
723 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
723 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
724
724
725 # There may be branches that cease to exist when the last commit in the
725 # There may be branches that cease to exist when the last commit in the
726 # branch was stripped. This code filters them out. Note that the
726 # branch was stripped. This code filters them out. Note that the
727 # branch that ceased to exist may not be in newbranches because
727 # branch that ceased to exist may not be in newbranches because
728 # newbranches is the set of candidate heads, which when you strip the
728 # newbranches is the set of candidate heads, which when you strip the
729 # last commit in a branch will be the parent branch.
729 # last commit in a branch will be the parent branch.
730 for branch in partial:
730 for branch in partial:
731 nodes = [head for head in partial[branch]
731 nodes = [head for head in partial[branch]
732 if self.changelog.hasnode(head)]
732 if self.changelog.hasnode(head)]
733 if not nodes:
733 if not nodes:
734 del partial[branch]
734 del partial[branch]
735
735
736 def lookup(self, key):
736 def lookup(self, key):
737 return self[key].node()
737 return self[key].node()
738
738
739 def lookupbranch(self, key, remote=None):
739 def lookupbranch(self, key, remote=None):
740 repo = remote or self
740 repo = remote or self
741 if key in repo.branchmap():
741 if key in repo.branchmap():
742 return key
742 return key
743
743
744 repo = (remote and remote.local()) and remote or self
744 repo = (remote and remote.local()) and remote or self
745 return repo[key].branch()
745 return repo[key].branch()
746
746
747 def known(self, nodes):
747 def known(self, nodes):
748 nm = self.changelog.nodemap
748 nm = self.changelog.nodemap
749 pc = self._phasecache
749 pc = self._phasecache
750 result = []
750 result = []
751 for n in nodes:
751 for n in nodes:
752 r = nm.get(n)
752 r = nm.get(n)
753 resp = not (r is None or pc.phase(self, r) >= phases.secret)
753 resp = not (r is None or pc.phase(self, r) >= phases.secret)
754 result.append(resp)
754 result.append(resp)
755 return result
755 return result
756
756
757 def local(self):
757 def local(self):
758 return self
758 return self
759
759
760 def cancopy(self):
760 def cancopy(self):
761 return self.local() # so statichttprepo's override of local() works
761 return self.local() # so statichttprepo's override of local() works
762
762
763 def join(self, f):
763 def join(self, f):
764 return os.path.join(self.path, f)
764 return os.path.join(self.path, f)
765
765
766 def wjoin(self, f):
766 def wjoin(self, f):
767 return os.path.join(self.root, f)
767 return os.path.join(self.root, f)
768
768
769 def file(self, f):
769 def file(self, f):
770 if f[0] == '/':
770 if f[0] == '/':
771 f = f[1:]
771 f = f[1:]
772 return filelog.filelog(self.sopener, f)
772 return filelog.filelog(self.sopener, f)
773
773
774 def changectx(self, changeid):
774 def changectx(self, changeid):
775 return self[changeid]
775 return self[changeid]
776
776
777 def parents(self, changeid=None):
777 def parents(self, changeid=None):
778 '''get list of changectxs for parents of changeid'''
778 '''get list of changectxs for parents of changeid'''
779 return self[changeid].parents()
779 return self[changeid].parents()
780
780
781 def setparents(self, p1, p2=nullid):
781 def setparents(self, p1, p2=nullid):
782 copies = self.dirstate.setparents(p1, p2)
782 copies = self.dirstate.setparents(p1, p2)
783 if copies:
783 if copies:
784 # Adjust copy records, the dirstate cannot do it, it
784 # Adjust copy records, the dirstate cannot do it, it
785 # requires access to parents manifests. Preserve them
785 # requires access to parents manifests. Preserve them
786 # only for entries added to first parent.
786 # only for entries added to first parent.
787 pctx = self[p1]
787 pctx = self[p1]
788 for f in copies:
788 for f in copies:
789 if f not in pctx and copies[f] in pctx:
789 if f not in pctx and copies[f] in pctx:
790 self.dirstate.copy(copies[f], f)
790 self.dirstate.copy(copies[f], f)
791
791
792 def filectx(self, path, changeid=None, fileid=None):
792 def filectx(self, path, changeid=None, fileid=None):
793 """changeid can be a changeset revision, node, or tag.
793 """changeid can be a changeset revision, node, or tag.
794 fileid can be a file revision or node."""
794 fileid can be a file revision or node."""
795 return context.filectx(self, path, changeid, fileid)
795 return context.filectx(self, path, changeid, fileid)
796
796
797 def getcwd(self):
797 def getcwd(self):
798 return self.dirstate.getcwd()
798 return self.dirstate.getcwd()
799
799
800 def pathto(self, f, cwd=None):
800 def pathto(self, f, cwd=None):
801 return self.dirstate.pathto(f, cwd)
801 return self.dirstate.pathto(f, cwd)
802
802
803 def wfile(self, f, mode='r'):
803 def wfile(self, f, mode='r'):
804 return self.wopener(f, mode)
804 return self.wopener(f, mode)
805
805
806 def _link(self, f):
806 def _link(self, f):
807 return os.path.islink(self.wjoin(f))
807 return os.path.islink(self.wjoin(f))
808
808
809 def _loadfilter(self, filter):
809 def _loadfilter(self, filter):
810 if filter not in self.filterpats:
810 if filter not in self.filterpats:
811 l = []
811 l = []
812 for pat, cmd in self.ui.configitems(filter):
812 for pat, cmd in self.ui.configitems(filter):
813 if cmd == '!':
813 if cmd == '!':
814 continue
814 continue
815 mf = matchmod.match(self.root, '', [pat])
815 mf = matchmod.match(self.root, '', [pat])
816 fn = None
816 fn = None
817 params = cmd
817 params = cmd
818 for name, filterfn in self._datafilters.iteritems():
818 for name, filterfn in self._datafilters.iteritems():
819 if cmd.startswith(name):
819 if cmd.startswith(name):
820 fn = filterfn
820 fn = filterfn
821 params = cmd[len(name):].lstrip()
821 params = cmd[len(name):].lstrip()
822 break
822 break
823 if not fn:
823 if not fn:
824 fn = lambda s, c, **kwargs: util.filter(s, c)
824 fn = lambda s, c, **kwargs: util.filter(s, c)
825 # Wrap old filters not supporting keyword arguments
825 # Wrap old filters not supporting keyword arguments
826 if not inspect.getargspec(fn)[2]:
826 if not inspect.getargspec(fn)[2]:
827 oldfn = fn
827 oldfn = fn
828 fn = lambda s, c, **kwargs: oldfn(s, c)
828 fn = lambda s, c, **kwargs: oldfn(s, c)
829 l.append((mf, fn, params))
829 l.append((mf, fn, params))
830 self.filterpats[filter] = l
830 self.filterpats[filter] = l
831 return self.filterpats[filter]
831 return self.filterpats[filter]
832
832
833 def _filter(self, filterpats, filename, data):
833 def _filter(self, filterpats, filename, data):
834 for mf, fn, cmd in filterpats:
834 for mf, fn, cmd in filterpats:
835 if mf(filename):
835 if mf(filename):
836 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
836 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
837 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
837 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
838 break
838 break
839
839
840 return data
840 return data
841
841
842 @propertycache
842 @propertycache
843 def _encodefilterpats(self):
843 def _encodefilterpats(self):
844 return self._loadfilter('encode')
844 return self._loadfilter('encode')
845
845
846 @propertycache
846 @propertycache
847 def _decodefilterpats(self):
847 def _decodefilterpats(self):
848 return self._loadfilter('decode')
848 return self._loadfilter('decode')
849
849
850 def adddatafilter(self, name, filter):
850 def adddatafilter(self, name, filter):
851 self._datafilters[name] = filter
851 self._datafilters[name] = filter
852
852
853 def wread(self, filename):
853 def wread(self, filename):
854 if self._link(filename):
854 if self._link(filename):
855 data = os.readlink(self.wjoin(filename))
855 data = os.readlink(self.wjoin(filename))
856 else:
856 else:
857 data = self.wopener.read(filename)
857 data = self.wopener.read(filename)
858 return self._filter(self._encodefilterpats, filename, data)
858 return self._filter(self._encodefilterpats, filename, data)
859
859
860 def wwrite(self, filename, data, flags):
860 def wwrite(self, filename, data, flags):
861 data = self._filter(self._decodefilterpats, filename, data)
861 data = self._filter(self._decodefilterpats, filename, data)
862 if 'l' in flags:
862 if 'l' in flags:
863 self.wopener.symlink(data, filename)
863 self.wopener.symlink(data, filename)
864 else:
864 else:
865 self.wopener.write(filename, data)
865 self.wopener.write(filename, data)
866 if 'x' in flags:
866 if 'x' in flags:
867 util.setflags(self.wjoin(filename), False, True)
867 util.setflags(self.wjoin(filename), False, True)
868
868
869 def wwritedata(self, filename, data):
869 def wwritedata(self, filename, data):
870 return self._filter(self._decodefilterpats, filename, data)
870 return self._filter(self._decodefilterpats, filename, data)
871
871
872 def transaction(self, desc):
872 def transaction(self, desc):
873 tr = self._transref and self._transref() or None
873 tr = self._transref and self._transref() or None
874 if tr and tr.running():
874 if tr and tr.running():
875 return tr.nest()
875 return tr.nest()
876
876
877 # abort here if the journal already exists
877 # abort here if the journal already exists
878 if os.path.exists(self.sjoin("journal")):
878 if os.path.exists(self.sjoin("journal")):
879 raise error.RepoError(
879 raise error.RepoError(
880 _("abandoned transaction found - run hg recover"))
880 _("abandoned transaction found - run hg recover"))
881
881
882 self._writejournal(desc)
882 self._writejournal(desc)
883 renames = [(x, undoname(x)) for x in self._journalfiles()]
883 renames = [(x, undoname(x)) for x in self._journalfiles()]
884
884
885 tr = transaction.transaction(self.ui.warn, self.sopener,
885 tr = transaction.transaction(self.ui.warn, self.sopener,
886 self.sjoin("journal"),
886 self.sjoin("journal"),
887 aftertrans(renames),
887 aftertrans(renames),
888 self.store.createmode)
888 self.store.createmode)
889 self._transref = weakref.ref(tr)
889 self._transref = weakref.ref(tr)
890 return tr
890 return tr
891
891
892 def _journalfiles(self):
892 def _journalfiles(self):
893 return (self.sjoin('journal'), self.join('journal.dirstate'),
893 return (self.sjoin('journal'), self.join('journal.dirstate'),
894 self.join('journal.branch'), self.join('journal.desc'),
894 self.join('journal.branch'), self.join('journal.desc'),
895 self.join('journal.bookmarks'),
895 self.join('journal.bookmarks'),
896 self.sjoin('journal.phaseroots'))
896 self.sjoin('journal.phaseroots'))
897
897
898 def undofiles(self):
898 def undofiles(self):
899 return [undoname(x) for x in self._journalfiles()]
899 return [undoname(x) for x in self._journalfiles()]
900
900
901 def _writejournal(self, desc):
901 def _writejournal(self, desc):
902 self.opener.write("journal.dirstate",
902 self.opener.write("journal.dirstate",
903 self.opener.tryread("dirstate"))
903 self.opener.tryread("dirstate"))
904 self.opener.write("journal.branch",
904 self.opener.write("journal.branch",
905 encoding.fromlocal(self.dirstate.branch()))
905 encoding.fromlocal(self.dirstate.branch()))
906 self.opener.write("journal.desc",
906 self.opener.write("journal.desc",
907 "%d\n%s\n" % (len(self), desc))
907 "%d\n%s\n" % (len(self), desc))
908 self.opener.write("journal.bookmarks",
908 self.opener.write("journal.bookmarks",
909 self.opener.tryread("bookmarks"))
909 self.opener.tryread("bookmarks"))
910 self.sopener.write("journal.phaseroots",
910 self.sopener.write("journal.phaseroots",
911 self.sopener.tryread("phaseroots"))
911 self.sopener.tryread("phaseroots"))
912
912
913 def recover(self):
913 def recover(self):
914 lock = self.lock()
914 lock = self.lock()
915 try:
915 try:
916 if os.path.exists(self.sjoin("journal")):
916 if os.path.exists(self.sjoin("journal")):
917 self.ui.status(_("rolling back interrupted transaction\n"))
917 self.ui.status(_("rolling back interrupted transaction\n"))
918 transaction.rollback(self.sopener, self.sjoin("journal"),
918 transaction.rollback(self.sopener, self.sjoin("journal"),
919 self.ui.warn)
919 self.ui.warn)
920 self.invalidate()
920 self.invalidate()
921 return True
921 return True
922 else:
922 else:
923 self.ui.warn(_("no interrupted transaction available\n"))
923 self.ui.warn(_("no interrupted transaction available\n"))
924 return False
924 return False
925 finally:
925 finally:
926 lock.release()
926 lock.release()
927
927
928 def rollback(self, dryrun=False, force=False):
928 def rollback(self, dryrun=False, force=False):
929 wlock = lock = None
929 wlock = lock = None
930 try:
930 try:
931 wlock = self.wlock()
931 wlock = self.wlock()
932 lock = self.lock()
932 lock = self.lock()
933 if os.path.exists(self.sjoin("undo")):
933 if os.path.exists(self.sjoin("undo")):
934 return self._rollback(dryrun, force)
934 return self._rollback(dryrun, force)
935 else:
935 else:
936 self.ui.warn(_("no rollback information available\n"))
936 self.ui.warn(_("no rollback information available\n"))
937 return 1
937 return 1
938 finally:
938 finally:
939 release(lock, wlock)
939 release(lock, wlock)
940
940
941 def _rollback(self, dryrun, force):
941 def _rollback(self, dryrun, force):
942 ui = self.ui
942 ui = self.ui
943 try:
943 try:
944 args = self.opener.read('undo.desc').splitlines()
944 args = self.opener.read('undo.desc').splitlines()
945 (oldlen, desc, detail) = (int(args[0]), args[1], None)
945 (oldlen, desc, detail) = (int(args[0]), args[1], None)
946 if len(args) >= 3:
946 if len(args) >= 3:
947 detail = args[2]
947 detail = args[2]
948 oldtip = oldlen - 1
948 oldtip = oldlen - 1
949
949
950 if detail and ui.verbose:
950 if detail and ui.verbose:
951 msg = (_('repository tip rolled back to revision %s'
951 msg = (_('repository tip rolled back to revision %s'
952 ' (undo %s: %s)\n')
952 ' (undo %s: %s)\n')
953 % (oldtip, desc, detail))
953 % (oldtip, desc, detail))
954 else:
954 else:
955 msg = (_('repository tip rolled back to revision %s'
955 msg = (_('repository tip rolled back to revision %s'
956 ' (undo %s)\n')
956 ' (undo %s)\n')
957 % (oldtip, desc))
957 % (oldtip, desc))
958 except IOError:
958 except IOError:
959 msg = _('rolling back unknown transaction\n')
959 msg = _('rolling back unknown transaction\n')
960 desc = None
960 desc = None
961
961
962 if not force and self['.'] != self['tip'] and desc == 'commit':
962 if not force and self['.'] != self['tip'] and desc == 'commit':
963 raise util.Abort(
963 raise util.Abort(
964 _('rollback of last commit while not checked out '
964 _('rollback of last commit while not checked out '
965 'may lose data'), hint=_('use -f to force'))
965 'may lose data'), hint=_('use -f to force'))
966
966
967 ui.status(msg)
967 ui.status(msg)
968 if dryrun:
968 if dryrun:
969 return 0
969 return 0
970
970
971 parents = self.dirstate.parents()
971 parents = self.dirstate.parents()
972 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
972 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
973 if os.path.exists(self.join('undo.bookmarks')):
973 if os.path.exists(self.join('undo.bookmarks')):
974 util.rename(self.join('undo.bookmarks'),
974 util.rename(self.join('undo.bookmarks'),
975 self.join('bookmarks'))
975 self.join('bookmarks'))
976 if os.path.exists(self.sjoin('undo.phaseroots')):
976 if os.path.exists(self.sjoin('undo.phaseroots')):
977 util.rename(self.sjoin('undo.phaseroots'),
977 util.rename(self.sjoin('undo.phaseroots'),
978 self.sjoin('phaseroots'))
978 self.sjoin('phaseroots'))
979 self.invalidate()
979 self.invalidate()
980
980
981 parentgone = (parents[0] not in self.changelog.nodemap or
981 parentgone = (parents[0] not in self.changelog.nodemap or
982 parents[1] not in self.changelog.nodemap)
982 parents[1] not in self.changelog.nodemap)
983 if parentgone:
983 if parentgone:
984 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
984 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
985 try:
985 try:
986 branch = self.opener.read('undo.branch')
986 branch = self.opener.read('undo.branch')
987 self.dirstate.setbranch(branch)
987 self.dirstate.setbranch(branch)
988 except IOError:
988 except IOError:
989 ui.warn(_('named branch could not be reset: '
989 ui.warn(_('named branch could not be reset: '
990 'current branch is still \'%s\'\n')
990 'current branch is still \'%s\'\n')
991 % self.dirstate.branch())
991 % self.dirstate.branch())
992
992
993 self.dirstate.invalidate()
993 self.dirstate.invalidate()
994 parents = tuple([p.rev() for p in self.parents()])
994 parents = tuple([p.rev() for p in self.parents()])
995 if len(parents) > 1:
995 if len(parents) > 1:
996 ui.status(_('working directory now based on '
996 ui.status(_('working directory now based on '
997 'revisions %d and %d\n') % parents)
997 'revisions %d and %d\n') % parents)
998 else:
998 else:
999 ui.status(_('working directory now based on '
999 ui.status(_('working directory now based on '
1000 'revision %d\n') % parents)
1000 'revision %d\n') % parents)
1001 # TODO: if we know which new heads may result from this rollback, pass
1001 # TODO: if we know which new heads may result from this rollback, pass
1002 # them to destroy(), which will prevent the branchhead cache from being
1002 # them to destroy(), which will prevent the branchhead cache from being
1003 # invalidated.
1003 # invalidated.
1004 self.destroyed()
1004 self.destroyed()
1005 return 0
1005 return 0
1006
1006
1007 def invalidatecaches(self):
1007 def invalidatecaches(self):
1008 def delcache(name):
1008 def delcache(name):
1009 try:
1009 try:
1010 delattr(self, name)
1010 delattr(self, name)
1011 except AttributeError:
1011 except AttributeError:
1012 pass
1012 pass
1013
1013
1014 delcache('_tagscache')
1014 delcache('_tagscache')
1015
1015
1016 self._branchcache = None # in UTF-8
1016 self._branchcache = None # in UTF-8
1017 self._branchcachetip = None
1017 self._branchcachetip = None
1018
1018
1019 def invalidatedirstate(self):
1019 def invalidatedirstate(self):
1020 '''Invalidates the dirstate, causing the next call to dirstate
1020 '''Invalidates the dirstate, causing the next call to dirstate
1021 to check if it was modified since the last time it was read,
1021 to check if it was modified since the last time it was read,
1022 rereading it if it has.
1022 rereading it if it has.
1023
1023
1024 This is different to dirstate.invalidate() that it doesn't always
1024 This is different to dirstate.invalidate() that it doesn't always
1025 rereads the dirstate. Use dirstate.invalidate() if you want to
1025 rereads the dirstate. Use dirstate.invalidate() if you want to
1026 explicitly read the dirstate again (i.e. restoring it to a previous
1026 explicitly read the dirstate again (i.e. restoring it to a previous
1027 known good state).'''
1027 known good state).'''
1028 if 'dirstate' in self.__dict__:
1028 if 'dirstate' in self.__dict__:
1029 for k in self.dirstate._filecache:
1029 for k in self.dirstate._filecache:
1030 try:
1030 try:
1031 delattr(self.dirstate, k)
1031 delattr(self.dirstate, k)
1032 except AttributeError:
1032 except AttributeError:
1033 pass
1033 pass
1034 delattr(self, 'dirstate')
1034 delattr(self, 'dirstate')
1035
1035
1036 def invalidate(self):
1036 def invalidate(self):
1037 for k in self._filecache:
1037 for k in self._filecache:
1038 # dirstate is invalidated separately in invalidatedirstate()
1038 # dirstate is invalidated separately in invalidatedirstate()
1039 if k == 'dirstate':
1039 if k == 'dirstate':
1040 continue
1040 continue
1041
1041
1042 try:
1042 try:
1043 delattr(self, k)
1043 delattr(self, k)
1044 except AttributeError:
1044 except AttributeError:
1045 pass
1045 pass
1046 self.invalidatecaches()
1046 self.invalidatecaches()
1047
1047
1048 # Discard all cache entries to force reloading everything.
1048 # Discard all cache entries to force reloading everything.
1049 self._filecache.clear()
1049 self._filecache.clear()
1050
1050
1051 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1051 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1052 try:
1052 try:
1053 l = lock.lock(lockname, 0, releasefn, desc=desc)
1053 l = lock.lock(lockname, 0, releasefn, desc=desc)
1054 except error.LockHeld, inst:
1054 except error.LockHeld, inst:
1055 if not wait:
1055 if not wait:
1056 raise
1056 raise
1057 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1057 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1058 (desc, inst.locker))
1058 (desc, inst.locker))
1059 # default to 600 seconds timeout
1059 # default to 600 seconds timeout
1060 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1060 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1061 releasefn, desc=desc)
1061 releasefn, desc=desc)
1062 if acquirefn:
1062 if acquirefn:
1063 acquirefn()
1063 acquirefn()
1064 return l
1064 return l
1065
1065
1066 def _afterlock(self, callback):
1066 def _afterlock(self, callback):
1067 """add a callback to the current repository lock.
1067 """add a callback to the current repository lock.
1068
1068
1069 The callback will be executed on lock release."""
1069 The callback will be executed on lock release."""
1070 l = self._lockref and self._lockref()
1070 l = self._lockref and self._lockref()
1071 if l:
1071 if l:
1072 l.postrelease.append(callback)
1072 l.postrelease.append(callback)
1073 else:
1073 else:
1074 callback()
1074 callback()
1075
1075
1076 def lock(self, wait=True):
1076 def lock(self, wait=True):
1077 '''Lock the repository store (.hg/store) and return a weak reference
1077 '''Lock the repository store (.hg/store) and return a weak reference
1078 to the lock. Use this before modifying the store (e.g. committing or
1078 to the lock. Use this before modifying the store (e.g. committing or
1079 stripping). If you are opening a transaction, get a lock as well.)'''
1079 stripping). If you are opening a transaction, get a lock as well.)'''
1080 l = self._lockref and self._lockref()
1080 l = self._lockref and self._lockref()
1081 if l is not None and l.held:
1081 if l is not None and l.held:
1082 l.lock()
1082 l.lock()
1083 return l
1083 return l
1084
1084
1085 def unlock():
1085 def unlock():
1086 self.store.write()
1086 self.store.write()
1087 if '_phasecache' in vars(self):
1087 if '_phasecache' in vars(self):
1088 self._phasecache.write()
1088 self._phasecache.write()
1089 for k, ce in self._filecache.items():
1089 for k, ce in self._filecache.items():
1090 if k == 'dirstate':
1090 if k == 'dirstate':
1091 continue
1091 continue
1092 ce.refresh()
1092 ce.refresh()
1093
1093
1094 l = self._lock(self.sjoin("lock"), wait, unlock,
1094 l = self._lock(self.sjoin("lock"), wait, unlock,
1095 self.invalidate, _('repository %s') % self.origroot)
1095 self.invalidate, _('repository %s') % self.origroot)
1096 self._lockref = weakref.ref(l)
1096 self._lockref = weakref.ref(l)
1097 return l
1097 return l
1098
1098
1099 def wlock(self, wait=True):
1099 def wlock(self, wait=True):
1100 '''Lock the non-store parts of the repository (everything under
1100 '''Lock the non-store parts of the repository (everything under
1101 .hg except .hg/store) and return a weak reference to the lock.
1101 .hg except .hg/store) and return a weak reference to the lock.
1102 Use this before modifying files in .hg.'''
1102 Use this before modifying files in .hg.'''
1103 l = self._wlockref and self._wlockref()
1103 l = self._wlockref and self._wlockref()
1104 if l is not None and l.held:
1104 if l is not None and l.held:
1105 l.lock()
1105 l.lock()
1106 return l
1106 return l
1107
1107
1108 def unlock():
1108 def unlock():
1109 self.dirstate.write()
1109 self.dirstate.write()
1110 ce = self._filecache.get('dirstate')
1110 ce = self._filecache.get('dirstate')
1111 if ce:
1111 if ce:
1112 ce.refresh()
1112 ce.refresh()
1113
1113
1114 l = self._lock(self.join("wlock"), wait, unlock,
1114 l = self._lock(self.join("wlock"), wait, unlock,
1115 self.invalidatedirstate, _('working directory of %s') %
1115 self.invalidatedirstate, _('working directory of %s') %
1116 self.origroot)
1116 self.origroot)
1117 self._wlockref = weakref.ref(l)
1117 self._wlockref = weakref.ref(l)
1118 return l
1118 return l
1119
1119
1120 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1120 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1121 """
1121 """
1122 commit an individual file as part of a larger transaction
1122 commit an individual file as part of a larger transaction
1123 """
1123 """
1124
1124
1125 fname = fctx.path()
1125 fname = fctx.path()
1126 text = fctx.data()
1126 text = fctx.data()
1127 flog = self.file(fname)
1127 flog = self.file(fname)
1128 fparent1 = manifest1.get(fname, nullid)
1128 fparent1 = manifest1.get(fname, nullid)
1129 fparent2 = fparent2o = manifest2.get(fname, nullid)
1129 fparent2 = fparent2o = manifest2.get(fname, nullid)
1130
1130
1131 meta = {}
1131 meta = {}
1132 copy = fctx.renamed()
1132 copy = fctx.renamed()
1133 if copy and copy[0] != fname:
1133 if copy and copy[0] != fname:
1134 # Mark the new revision of this file as a copy of another
1134 # Mark the new revision of this file as a copy of another
1135 # file. This copy data will effectively act as a parent
1135 # file. This copy data will effectively act as a parent
1136 # of this new revision. If this is a merge, the first
1136 # of this new revision. If this is a merge, the first
1137 # parent will be the nullid (meaning "look up the copy data")
1137 # parent will be the nullid (meaning "look up the copy data")
1138 # and the second one will be the other parent. For example:
1138 # and the second one will be the other parent. For example:
1139 #
1139 #
1140 # 0 --- 1 --- 3 rev1 changes file foo
1140 # 0 --- 1 --- 3 rev1 changes file foo
1141 # \ / rev2 renames foo to bar and changes it
1141 # \ / rev2 renames foo to bar and changes it
1142 # \- 2 -/ rev3 should have bar with all changes and
1142 # \- 2 -/ rev3 should have bar with all changes and
1143 # should record that bar descends from
1143 # should record that bar descends from
1144 # bar in rev2 and foo in rev1
1144 # bar in rev2 and foo in rev1
1145 #
1145 #
1146 # this allows this merge to succeed:
1146 # this allows this merge to succeed:
1147 #
1147 #
1148 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1148 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1149 # \ / merging rev3 and rev4 should use bar@rev2
1149 # \ / merging rev3 and rev4 should use bar@rev2
1150 # \- 2 --- 4 as the merge base
1150 # \- 2 --- 4 as the merge base
1151 #
1151 #
1152
1152
1153 cfname = copy[0]
1153 cfname = copy[0]
1154 crev = manifest1.get(cfname)
1154 crev = manifest1.get(cfname)
1155 newfparent = fparent2
1155 newfparent = fparent2
1156
1156
1157 if manifest2: # branch merge
1157 if manifest2: # branch merge
1158 if fparent2 == nullid or crev is None: # copied on remote side
1158 if fparent2 == nullid or crev is None: # copied on remote side
1159 if cfname in manifest2:
1159 if cfname in manifest2:
1160 crev = manifest2[cfname]
1160 crev = manifest2[cfname]
1161 newfparent = fparent1
1161 newfparent = fparent1
1162
1162
1163 # find source in nearest ancestor if we've lost track
1163 # find source in nearest ancestor if we've lost track
1164 if not crev:
1164 if not crev:
1165 self.ui.debug(" %s: searching for copy revision for %s\n" %
1165 self.ui.debug(" %s: searching for copy revision for %s\n" %
1166 (fname, cfname))
1166 (fname, cfname))
1167 for ancestor in self[None].ancestors():
1167 for ancestor in self[None].ancestors():
1168 if cfname in ancestor:
1168 if cfname in ancestor:
1169 crev = ancestor[cfname].filenode()
1169 crev = ancestor[cfname].filenode()
1170 break
1170 break
1171
1171
1172 if crev:
1172 if crev:
1173 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1173 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1174 meta["copy"] = cfname
1174 meta["copy"] = cfname
1175 meta["copyrev"] = hex(crev)
1175 meta["copyrev"] = hex(crev)
1176 fparent1, fparent2 = nullid, newfparent
1176 fparent1, fparent2 = nullid, newfparent
1177 else:
1177 else:
1178 self.ui.warn(_("warning: can't find ancestor for '%s' "
1178 self.ui.warn(_("warning: can't find ancestor for '%s' "
1179 "copied from '%s'!\n") % (fname, cfname))
1179 "copied from '%s'!\n") % (fname, cfname))
1180
1180
1181 elif fparent2 != nullid:
1181 elif fparent2 != nullid:
1182 # is one parent an ancestor of the other?
1182 # is one parent an ancestor of the other?
1183 fparentancestor = flog.ancestor(fparent1, fparent2)
1183 fparentancestor = flog.ancestor(fparent1, fparent2)
1184 if fparentancestor == fparent1:
1184 if fparentancestor == fparent1:
1185 fparent1, fparent2 = fparent2, nullid
1185 fparent1, fparent2 = fparent2, nullid
1186 elif fparentancestor == fparent2:
1186 elif fparentancestor == fparent2:
1187 fparent2 = nullid
1187 fparent2 = nullid
1188
1188
1189 # is the file changed?
1189 # is the file changed?
1190 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1190 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1191 changelist.append(fname)
1191 changelist.append(fname)
1192 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1192 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1193
1193
1194 # are just the flags changed during merge?
1194 # are just the flags changed during merge?
1195 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1195 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1196 changelist.append(fname)
1196 changelist.append(fname)
1197
1197
1198 return fparent1
1198 return fparent1
1199
1199
1200 def commit(self, text="", user=None, date=None, match=None, force=False,
1200 def commit(self, text="", user=None, date=None, match=None, force=False,
1201 editor=False, extra={}):
1201 editor=False, extra={}):
1202 """Add a new revision to current repository.
1202 """Add a new revision to current repository.
1203
1203
1204 Revision information is gathered from the working directory,
1204 Revision information is gathered from the working directory,
1205 match can be used to filter the committed files. If editor is
1205 match can be used to filter the committed files. If editor is
1206 supplied, it is called to get a commit message.
1206 supplied, it is called to get a commit message.
1207 """
1207 """
1208
1208
1209 def fail(f, msg):
1209 def fail(f, msg):
1210 raise util.Abort('%s: %s' % (f, msg))
1210 raise util.Abort('%s: %s' % (f, msg))
1211
1211
1212 if not match:
1212 if not match:
1213 match = matchmod.always(self.root, '')
1213 match = matchmod.always(self.root, '')
1214
1214
1215 if not force:
1215 if not force:
1216 vdirs = []
1216 vdirs = []
1217 match.dir = vdirs.append
1217 match.dir = vdirs.append
1218 match.bad = fail
1218 match.bad = fail
1219
1219
1220 wlock = self.wlock()
1220 wlock = self.wlock()
1221 try:
1221 try:
1222 wctx = self[None]
1222 wctx = self[None]
1223 merge = len(wctx.parents()) > 1
1223 merge = len(wctx.parents()) > 1
1224
1224
1225 if (not force and merge and match and
1225 if (not force and merge and match and
1226 (match.files() or match.anypats())):
1226 (match.files() or match.anypats())):
1227 raise util.Abort(_('cannot partially commit a merge '
1227 raise util.Abort(_('cannot partially commit a merge '
1228 '(do not specify files or patterns)'))
1228 '(do not specify files or patterns)'))
1229
1229
1230 changes = self.status(match=match, clean=force)
1230 changes = self.status(match=match, clean=force)
1231 if force:
1231 if force:
1232 changes[0].extend(changes[6]) # mq may commit unchanged files
1232 changes[0].extend(changes[6]) # mq may commit unchanged files
1233
1233
1234 # check subrepos
1234 # check subrepos
1235 subs = []
1235 subs = []
1236 commitsubs = set()
1236 commitsubs = set()
1237 newstate = wctx.substate.copy()
1237 newstate = wctx.substate.copy()
1238 # only manage subrepos and .hgsubstate if .hgsub is present
1238 # only manage subrepos and .hgsubstate if .hgsub is present
1239 if '.hgsub' in wctx:
1239 if '.hgsub' in wctx:
1240 # we'll decide whether to track this ourselves, thanks
1240 # we'll decide whether to track this ourselves, thanks
1241 if '.hgsubstate' in changes[0]:
1241 if '.hgsubstate' in changes[0]:
1242 changes[0].remove('.hgsubstate')
1242 changes[0].remove('.hgsubstate')
1243 if '.hgsubstate' in changes[2]:
1243 if '.hgsubstate' in changes[2]:
1244 changes[2].remove('.hgsubstate')
1244 changes[2].remove('.hgsubstate')
1245
1245
1246 # compare current state to last committed state
1246 # compare current state to last committed state
1247 # build new substate based on last committed state
1247 # build new substate based on last committed state
1248 oldstate = wctx.p1().substate
1248 oldstate = wctx.p1().substate
1249 for s in sorted(newstate.keys()):
1249 for s in sorted(newstate.keys()):
1250 if not match(s):
1250 if not match(s):
1251 # ignore working copy, use old state if present
1251 # ignore working copy, use old state if present
1252 if s in oldstate:
1252 if s in oldstate:
1253 newstate[s] = oldstate[s]
1253 newstate[s] = oldstate[s]
1254 continue
1254 continue
1255 if not force:
1255 if not force:
1256 raise util.Abort(
1256 raise util.Abort(
1257 _("commit with new subrepo %s excluded") % s)
1257 _("commit with new subrepo %s excluded") % s)
1258 if wctx.sub(s).dirty(True):
1258 if wctx.sub(s).dirty(True):
1259 if not self.ui.configbool('ui', 'commitsubrepos'):
1259 if not self.ui.configbool('ui', 'commitsubrepos'):
1260 raise util.Abort(
1260 raise util.Abort(
1261 _("uncommitted changes in subrepo %s") % s,
1261 _("uncommitted changes in subrepo %s") % s,
1262 hint=_("use --subrepos for recursive commit"))
1262 hint=_("use --subrepos for recursive commit"))
1263 subs.append(s)
1263 subs.append(s)
1264 commitsubs.add(s)
1264 commitsubs.add(s)
1265 else:
1265 else:
1266 bs = wctx.sub(s).basestate()
1266 bs = wctx.sub(s).basestate()
1267 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1267 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1268 if oldstate.get(s, (None, None, None))[1] != bs:
1268 if oldstate.get(s, (None, None, None))[1] != bs:
1269 subs.append(s)
1269 subs.append(s)
1270
1270
1271 # check for removed subrepos
1271 # check for removed subrepos
1272 for p in wctx.parents():
1272 for p in wctx.parents():
1273 r = [s for s in p.substate if s not in newstate]
1273 r = [s for s in p.substate if s not in newstate]
1274 subs += [s for s in r if match(s)]
1274 subs += [s for s in r if match(s)]
1275 if subs:
1275 if subs:
1276 if (not match('.hgsub') and
1276 if (not match('.hgsub') and
1277 '.hgsub' in (wctx.modified() + wctx.added())):
1277 '.hgsub' in (wctx.modified() + wctx.added())):
1278 raise util.Abort(
1278 raise util.Abort(
1279 _("can't commit subrepos without .hgsub"))
1279 _("can't commit subrepos without .hgsub"))
1280 changes[0].insert(0, '.hgsubstate')
1280 changes[0].insert(0, '.hgsubstate')
1281
1281
1282 elif '.hgsub' in changes[2]:
1282 elif '.hgsub' in changes[2]:
1283 # clean up .hgsubstate when .hgsub is removed
1283 # clean up .hgsubstate when .hgsub is removed
1284 if ('.hgsubstate' in wctx and
1284 if ('.hgsubstate' in wctx and
1285 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1285 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1286 changes[2].insert(0, '.hgsubstate')
1286 changes[2].insert(0, '.hgsubstate')
1287
1287
1288 # make sure all explicit patterns are matched
1288 # make sure all explicit patterns are matched
1289 if not force and match.files():
1289 if not force and match.files():
1290 matched = set(changes[0] + changes[1] + changes[2])
1290 matched = set(changes[0] + changes[1] + changes[2])
1291
1291
1292 for f in match.files():
1292 for f in match.files():
1293 if f == '.' or f in matched or f in wctx.substate:
1293 if f == '.' or f in matched or f in wctx.substate:
1294 continue
1294 continue
1295 if f in changes[3]: # missing
1295 if f in changes[3]: # missing
1296 fail(f, _('file not found!'))
1296 fail(f, _('file not found!'))
1297 if f in vdirs: # visited directory
1297 if f in vdirs: # visited directory
1298 d = f + '/'
1298 d = f + '/'
1299 for mf in matched:
1299 for mf in matched:
1300 if mf.startswith(d):
1300 if mf.startswith(d):
1301 break
1301 break
1302 else:
1302 else:
1303 fail(f, _("no match under directory!"))
1303 fail(f, _("no match under directory!"))
1304 elif f not in self.dirstate:
1304 elif f not in self.dirstate:
1305 fail(f, _("file not tracked!"))
1305 fail(f, _("file not tracked!"))
1306
1306
1307 if (not force and not extra.get("close") and not merge
1307 if (not force and not extra.get("close") and not merge
1308 and not (changes[0] or changes[1] or changes[2])
1308 and not (changes[0] or changes[1] or changes[2])
1309 and wctx.branch() == wctx.p1().branch()):
1309 and wctx.branch() == wctx.p1().branch()):
1310 return None
1310 return None
1311
1311
1312 if merge and changes[3]:
1312 if merge and changes[3]:
1313 raise util.Abort(_("cannot commit merge with missing files"))
1313 raise util.Abort(_("cannot commit merge with missing files"))
1314
1314
1315 ms = mergemod.mergestate(self)
1315 ms = mergemod.mergestate(self)
1316 for f in changes[0]:
1316 for f in changes[0]:
1317 if f in ms and ms[f] == 'u':
1317 if f in ms and ms[f] == 'u':
1318 raise util.Abort(_("unresolved merge conflicts "
1318 raise util.Abort(_("unresolved merge conflicts "
1319 "(see hg help resolve)"))
1319 "(see hg help resolve)"))
1320
1320
1321 cctx = context.workingctx(self, text, user, date, extra, changes)
1321 cctx = context.workingctx(self, text, user, date, extra, changes)
1322 if editor:
1322 if editor:
1323 cctx._text = editor(self, cctx, subs)
1323 cctx._text = editor(self, cctx, subs)
1324 edited = (text != cctx._text)
1324 edited = (text != cctx._text)
1325
1325
1326 # commit subs and write new state
1326 # commit subs and write new state
1327 if subs:
1327 if subs:
1328 for s in sorted(commitsubs):
1328 for s in sorted(commitsubs):
1329 sub = wctx.sub(s)
1329 sub = wctx.sub(s)
1330 self.ui.status(_('committing subrepository %s\n') %
1330 self.ui.status(_('committing subrepository %s\n') %
1331 subrepo.subrelpath(sub))
1331 subrepo.subrelpath(sub))
1332 sr = sub.commit(cctx._text, user, date)
1332 sr = sub.commit(cctx._text, user, date)
1333 newstate[s] = (newstate[s][0], sr)
1333 newstate[s] = (newstate[s][0], sr)
1334 subrepo.writestate(self, newstate)
1334 subrepo.writestate(self, newstate)
1335
1335
1336 # Save commit message in case this transaction gets rolled back
1336 # Save commit message in case this transaction gets rolled back
1337 # (e.g. by a pretxncommit hook). Leave the content alone on
1337 # (e.g. by a pretxncommit hook). Leave the content alone on
1338 # the assumption that the user will use the same editor again.
1338 # the assumption that the user will use the same editor again.
1339 msgfn = self.savecommitmessage(cctx._text)
1339 msgfn = self.savecommitmessage(cctx._text)
1340
1340
1341 p1, p2 = self.dirstate.parents()
1341 p1, p2 = self.dirstate.parents()
1342 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1342 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1343 try:
1343 try:
1344 self.hook("precommit", throw=True, parent1=hookp1,
1344 self.hook("precommit", throw=True, parent1=hookp1,
1345 parent2=hookp2)
1345 parent2=hookp2)
1346 ret = self.commitctx(cctx, True)
1346 ret = self.commitctx(cctx, True)
1347 except: # re-raises
1347 except: # re-raises
1348 if edited:
1348 if edited:
1349 self.ui.write(
1349 self.ui.write(
1350 _('note: commit message saved in %s\n') % msgfn)
1350 _('note: commit message saved in %s\n') % msgfn)
1351 raise
1351 raise
1352
1352
1353 # update bookmarks, dirstate and mergestate
1353 # update bookmarks, dirstate and mergestate
1354 bookmarks.update(self, [p1, p2], ret)
1354 bookmarks.update(self, [p1, p2], ret)
1355 for f in changes[0] + changes[1]:
1355 for f in changes[0] + changes[1]:
1356 self.dirstate.normal(f)
1356 self.dirstate.normal(f)
1357 for f in changes[2]:
1357 for f in changes[2]:
1358 self.dirstate.drop(f)
1358 self.dirstate.drop(f)
1359 self.dirstate.setparents(ret)
1359 self.dirstate.setparents(ret)
1360 ms.reset()
1360 ms.reset()
1361 finally:
1361 finally:
1362 wlock.release()
1362 wlock.release()
1363
1363
1364 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1364 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1365 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1365 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1366 self._afterlock(commithook)
1366 self._afterlock(commithook)
1367 return ret
1367 return ret
1368
1368
1369 def commitctx(self, ctx, error=False):
1369 def commitctx(self, ctx, error=False):
1370 """Add a new revision to current repository.
1370 """Add a new revision to current repository.
1371 Revision information is passed via the context argument.
1371 Revision information is passed via the context argument.
1372 """
1372 """
1373
1373
1374 tr = lock = None
1374 tr = lock = None
1375 removed = list(ctx.removed())
1375 removed = list(ctx.removed())
1376 p1, p2 = ctx.p1(), ctx.p2()
1376 p1, p2 = ctx.p1(), ctx.p2()
1377 user = ctx.user()
1377 user = ctx.user()
1378
1378
1379 lock = self.lock()
1379 lock = self.lock()
1380 try:
1380 try:
1381 tr = self.transaction("commit")
1381 tr = self.transaction("commit")
1382 trp = weakref.proxy(tr)
1382 trp = weakref.proxy(tr)
1383
1383
1384 if ctx.files():
1384 if ctx.files():
1385 m1 = p1.manifest().copy()
1385 m1 = p1.manifest().copy()
1386 m2 = p2.manifest()
1386 m2 = p2.manifest()
1387
1387
1388 # check in files
1388 # check in files
1389 new = {}
1389 new = {}
1390 changed = []
1390 changed = []
1391 linkrev = len(self)
1391 linkrev = len(self)
1392 for f in sorted(ctx.modified() + ctx.added()):
1392 for f in sorted(ctx.modified() + ctx.added()):
1393 self.ui.note(f + "\n")
1393 self.ui.note(f + "\n")
1394 try:
1394 try:
1395 fctx = ctx[f]
1395 fctx = ctx[f]
1396 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1396 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1397 changed)
1397 changed)
1398 m1.set(f, fctx.flags())
1398 m1.set(f, fctx.flags())
1399 except OSError, inst:
1399 except OSError, inst:
1400 self.ui.warn(_("trouble committing %s!\n") % f)
1400 self.ui.warn(_("trouble committing %s!\n") % f)
1401 raise
1401 raise
1402 except IOError, inst:
1402 except IOError, inst:
1403 errcode = getattr(inst, 'errno', errno.ENOENT)
1403 errcode = getattr(inst, 'errno', errno.ENOENT)
1404 if error or errcode and errcode != errno.ENOENT:
1404 if error or errcode and errcode != errno.ENOENT:
1405 self.ui.warn(_("trouble committing %s!\n") % f)
1405 self.ui.warn(_("trouble committing %s!\n") % f)
1406 raise
1406 raise
1407 else:
1407 else:
1408 removed.append(f)
1408 removed.append(f)
1409
1409
1410 # update manifest
1410 # update manifest
1411 m1.update(new)
1411 m1.update(new)
1412 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1412 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1413 drop = [f for f in removed if f in m1]
1413 drop = [f for f in removed if f in m1]
1414 for f in drop:
1414 for f in drop:
1415 del m1[f]
1415 del m1[f]
1416 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1416 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1417 p2.manifestnode(), (new, drop))
1417 p2.manifestnode(), (new, drop))
1418 files = changed + removed
1418 files = changed + removed
1419 else:
1419 else:
1420 mn = p1.manifestnode()
1420 mn = p1.manifestnode()
1421 files = []
1421 files = []
1422
1422
1423 # update changelog
1423 # update changelog
1424 self.changelog.delayupdate()
1424 self.changelog.delayupdate()
1425 n = self.changelog.add(mn, files, ctx.description(),
1425 n = self.changelog.add(mn, files, ctx.description(),
1426 trp, p1.node(), p2.node(),
1426 trp, p1.node(), p2.node(),
1427 user, ctx.date(), ctx.extra().copy())
1427 user, ctx.date(), ctx.extra().copy())
1428 p = lambda: self.changelog.writepending() and self.root or ""
1428 p = lambda: self.changelog.writepending() and self.root or ""
1429 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1429 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1430 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1430 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1431 parent2=xp2, pending=p)
1431 parent2=xp2, pending=p)
1432 self.changelog.finalize(trp)
1432 self.changelog.finalize(trp)
1433 # set the new commit is proper phase
1433 # set the new commit is proper phase
1434 targetphase = phases.newcommitphase(self.ui)
1434 targetphase = phases.newcommitphase(self.ui)
1435 if targetphase:
1435 if targetphase:
1436 # retract boundary do not alter parent changeset.
1436 # retract boundary do not alter parent changeset.
1437 # if a parent have higher the resulting phase will
1437 # if a parent have higher the resulting phase will
1438 # be compliant anyway
1438 # be compliant anyway
1439 #
1439 #
1440 # if minimal phase was 0 we don't need to retract anything
1440 # if minimal phase was 0 we don't need to retract anything
1441 phases.retractboundary(self, targetphase, [n])
1441 phases.retractboundary(self, targetphase, [n])
1442 tr.close()
1442 tr.close()
1443 self.updatebranchcache()
1443 self.updatebranchcache()
1444 return n
1444 return n
1445 finally:
1445 finally:
1446 if tr:
1446 if tr:
1447 tr.release()
1447 tr.release()
1448 lock.release()
1448 lock.release()
1449
1449
1450 def destroyed(self, newheadnodes=None):
1450 def destroyed(self, newheadnodes=None):
1451 '''Inform the repository that nodes have been destroyed.
1451 '''Inform the repository that nodes have been destroyed.
1452 Intended for use by strip and rollback, so there's a common
1452 Intended for use by strip and rollback, so there's a common
1453 place for anything that has to be done after destroying history.
1453 place for anything that has to be done after destroying history.
1454
1454
1455 If you know the branchheadcache was uptodate before nodes were removed
1455 If you know the branchheadcache was uptodate before nodes were removed
1456 and you also know the set of candidate new heads that may have resulted
1456 and you also know the set of candidate new heads that may have resulted
1457 from the destruction, you can set newheadnodes. This will enable the
1457 from the destruction, you can set newheadnodes. This will enable the
1458 code to update the branchheads cache, rather than having future code
1458 code to update the branchheads cache, rather than having future code
1459 decide it's invalid and regenrating it from scratch.
1459 decide it's invalid and regenrating it from scratch.
1460 '''
1460 '''
1461 # If we have info, newheadnodes, on how to update the branch cache, do
1461 # If we have info, newheadnodes, on how to update the branch cache, do
1462 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1462 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1463 # will be caught the next time it is read.
1463 # will be caught the next time it is read.
1464 if newheadnodes:
1464 if newheadnodes:
1465 tiprev = len(self) - 1
1465 tiprev = len(self) - 1
1466 ctxgen = (self[node] for node in newheadnodes
1466 ctxgen = (self[node] for node in newheadnodes
1467 if self.changelog.hasnode(node))
1467 if self.changelog.hasnode(node))
1468 self._updatebranchcache(self._branchcache, ctxgen)
1468 self._updatebranchcache(self._branchcache, ctxgen)
1469 self._writebranchcache(self._branchcache, self.changelog.tip(),
1469 self._writebranchcache(self._branchcache, self.changelog.tip(),
1470 tiprev)
1470 tiprev)
1471
1471
1472 # Ensure the persistent tag cache is updated. Doing it now
1472 # Ensure the persistent tag cache is updated. Doing it now
1473 # means that the tag cache only has to worry about destroyed
1473 # means that the tag cache only has to worry about destroyed
1474 # heads immediately after a strip/rollback. That in turn
1474 # heads immediately after a strip/rollback. That in turn
1475 # guarantees that "cachetip == currenttip" (comparing both rev
1475 # guarantees that "cachetip == currenttip" (comparing both rev
1476 # and node) always means no nodes have been added or destroyed.
1476 # and node) always means no nodes have been added or destroyed.
1477
1477
1478 # XXX this is suboptimal when qrefresh'ing: we strip the current
1478 # XXX this is suboptimal when qrefresh'ing: we strip the current
1479 # head, refresh the tag cache, then immediately add a new head.
1479 # head, refresh the tag cache, then immediately add a new head.
1480 # But I think doing it this way is necessary for the "instant
1480 # But I think doing it this way is necessary for the "instant
1481 # tag cache retrieval" case to work.
1481 # tag cache retrieval" case to work.
1482 self.invalidatecaches()
1482 self.invalidatecaches()
1483
1483
1484 def walk(self, match, node=None):
1484 def walk(self, match, node=None):
1485 '''
1485 '''
1486 walk recursively through the directory tree or a given
1486 walk recursively through the directory tree or a given
1487 changeset, finding all files matched by the match
1487 changeset, finding all files matched by the match
1488 function
1488 function
1489 '''
1489 '''
1490 return self[node].walk(match)
1490 return self[node].walk(match)
1491
1491
1492 def status(self, node1='.', node2=None, match=None,
1492 def status(self, node1='.', node2=None, match=None,
1493 ignored=False, clean=False, unknown=False,
1493 ignored=False, clean=False, unknown=False,
1494 listsubrepos=False):
1494 listsubrepos=False):
1495 """return status of files between two nodes or node and working
1495 """return status of files between two nodes or node and working
1496 directory.
1496 directory.
1497
1497
1498 If node1 is None, use the first dirstate parent instead.
1498 If node1 is None, use the first dirstate parent instead.
1499 If node2 is None, compare node1 with working directory.
1499 If node2 is None, compare node1 with working directory.
1500 """
1500 """
1501
1501
1502 def mfmatches(ctx):
1502 def mfmatches(ctx):
1503 mf = ctx.manifest().copy()
1503 mf = ctx.manifest().copy()
1504 if match.always():
1504 if match.always():
1505 return mf
1505 return mf
1506 for fn in mf.keys():
1506 for fn in mf.keys():
1507 if not match(fn):
1507 if not match(fn):
1508 del mf[fn]
1508 del mf[fn]
1509 return mf
1509 return mf
1510
1510
1511 if isinstance(node1, context.changectx):
1511 if isinstance(node1, context.changectx):
1512 ctx1 = node1
1512 ctx1 = node1
1513 else:
1513 else:
1514 ctx1 = self[node1]
1514 ctx1 = self[node1]
1515 if isinstance(node2, context.changectx):
1515 if isinstance(node2, context.changectx):
1516 ctx2 = node2
1516 ctx2 = node2
1517 else:
1517 else:
1518 ctx2 = self[node2]
1518 ctx2 = self[node2]
1519
1519
1520 working = ctx2.rev() is None
1520 working = ctx2.rev() is None
1521 parentworking = working and ctx1 == self['.']
1521 parentworking = working and ctx1 == self['.']
1522 match = match or matchmod.always(self.root, self.getcwd())
1522 match = match or matchmod.always(self.root, self.getcwd())
1523 listignored, listclean, listunknown = ignored, clean, unknown
1523 listignored, listclean, listunknown = ignored, clean, unknown
1524
1524
1525 # load earliest manifest first for caching reasons
1525 # load earliest manifest first for caching reasons
1526 if not working and ctx2.rev() < ctx1.rev():
1526 if not working and ctx2.rev() < ctx1.rev():
1527 ctx2.manifest()
1527 ctx2.manifest()
1528
1528
1529 if not parentworking:
1529 if not parentworking:
1530 def bad(f, msg):
1530 def bad(f, msg):
1531 # 'f' may be a directory pattern from 'match.files()',
1531 # 'f' may be a directory pattern from 'match.files()',
1532 # so 'f not in ctx1' is not enough
1532 # so 'f not in ctx1' is not enough
1533 if f not in ctx1 and f not in ctx1.dirs():
1533 if f not in ctx1 and f not in ctx1.dirs():
1534 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1534 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1535 match.bad = bad
1535 match.bad = bad
1536
1536
1537 if working: # we need to scan the working dir
1537 if working: # we need to scan the working dir
1538 subrepos = []
1538 subrepos = []
1539 if '.hgsub' in self.dirstate:
1539 if '.hgsub' in self.dirstate:
1540 subrepos = ctx2.substate.keys()
1540 subrepos = ctx2.substate.keys()
1541 s = self.dirstate.status(match, subrepos, listignored,
1541 s = self.dirstate.status(match, subrepos, listignored,
1542 listclean, listunknown)
1542 listclean, listunknown)
1543 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1543 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1544
1544
1545 # check for any possibly clean files
1545 # check for any possibly clean files
1546 if parentworking and cmp:
1546 if parentworking and cmp:
1547 fixup = []
1547 fixup = []
1548 # do a full compare of any files that might have changed
1548 # do a full compare of any files that might have changed
1549 for f in sorted(cmp):
1549 for f in sorted(cmp):
1550 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1550 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1551 or ctx1[f].cmp(ctx2[f])):
1551 or ctx1[f].cmp(ctx2[f])):
1552 modified.append(f)
1552 modified.append(f)
1553 else:
1553 else:
1554 fixup.append(f)
1554 fixup.append(f)
1555
1555
1556 # update dirstate for files that are actually clean
1556 # update dirstate for files that are actually clean
1557 if fixup:
1557 if fixup:
1558 if listclean:
1558 if listclean:
1559 clean += fixup
1559 clean += fixup
1560
1560
1561 try:
1561 try:
1562 # updating the dirstate is optional
1562 # updating the dirstate is optional
1563 # so we don't wait on the lock
1563 # so we don't wait on the lock
1564 wlock = self.wlock(False)
1564 wlock = self.wlock(False)
1565 try:
1565 try:
1566 for f in fixup:
1566 for f in fixup:
1567 self.dirstate.normal(f)
1567 self.dirstate.normal(f)
1568 finally:
1568 finally:
1569 wlock.release()
1569 wlock.release()
1570 except error.LockError:
1570 except error.LockError:
1571 pass
1571 pass
1572
1572
1573 if not parentworking:
1573 if not parentworking:
1574 mf1 = mfmatches(ctx1)
1574 mf1 = mfmatches(ctx1)
1575 if working:
1575 if working:
1576 # we are comparing working dir against non-parent
1576 # we are comparing working dir against non-parent
1577 # generate a pseudo-manifest for the working dir
1577 # generate a pseudo-manifest for the working dir
1578 mf2 = mfmatches(self['.'])
1578 mf2 = mfmatches(self['.'])
1579 for f in cmp + modified + added:
1579 for f in cmp + modified + added:
1580 mf2[f] = None
1580 mf2[f] = None
1581 mf2.set(f, ctx2.flags(f))
1581 mf2.set(f, ctx2.flags(f))
1582 for f in removed:
1582 for f in removed:
1583 if f in mf2:
1583 if f in mf2:
1584 del mf2[f]
1584 del mf2[f]
1585 else:
1585 else:
1586 # we are comparing two revisions
1586 # we are comparing two revisions
1587 deleted, unknown, ignored = [], [], []
1587 deleted, unknown, ignored = [], [], []
1588 mf2 = mfmatches(ctx2)
1588 mf2 = mfmatches(ctx2)
1589
1589
1590 modified, added, clean = [], [], []
1590 modified, added, clean = [], [], []
1591 withflags = mf1.withflags() | mf2.withflags()
1591 withflags = mf1.withflags() | mf2.withflags()
1592 for fn in mf2:
1592 for fn in mf2:
1593 if fn in mf1:
1593 if fn in mf1:
1594 if (fn not in deleted and
1594 if (fn not in deleted and
1595 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1595 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1596 (mf1[fn] != mf2[fn] and
1596 (mf1[fn] != mf2[fn] and
1597 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1597 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1598 modified.append(fn)
1598 modified.append(fn)
1599 elif listclean:
1599 elif listclean:
1600 clean.append(fn)
1600 clean.append(fn)
1601 del mf1[fn]
1601 del mf1[fn]
1602 elif fn not in deleted:
1602 elif fn not in deleted:
1603 added.append(fn)
1603 added.append(fn)
1604 removed = mf1.keys()
1604 removed = mf1.keys()
1605
1605
1606 if working and modified and not self.dirstate._checklink:
1606 if working and modified and not self.dirstate._checklink:
1607 # Symlink placeholders may get non-symlink-like contents
1607 # Symlink placeholders may get non-symlink-like contents
1608 # via user error or dereferencing by NFS or Samba servers,
1608 # via user error or dereferencing by NFS or Samba servers,
1609 # so we filter out any placeholders that don't look like a
1609 # so we filter out any placeholders that don't look like a
1610 # symlink
1610 # symlink
1611 sane = []
1611 sane = []
1612 for f in modified:
1612 for f in modified:
1613 if ctx2.flags(f) == 'l':
1613 if ctx2.flags(f) == 'l':
1614 d = ctx2[f].data()
1614 d = ctx2[f].data()
1615 if len(d) >= 1024 or '\n' in d or util.binary(d):
1615 if len(d) >= 1024 or '\n' in d or util.binary(d):
1616 self.ui.debug('ignoring suspect symlink placeholder'
1616 self.ui.debug('ignoring suspect symlink placeholder'
1617 ' "%s"\n' % f)
1617 ' "%s"\n' % f)
1618 continue
1618 continue
1619 sane.append(f)
1619 sane.append(f)
1620 modified = sane
1620 modified = sane
1621
1621
1622 r = modified, added, removed, deleted, unknown, ignored, clean
1622 r = modified, added, removed, deleted, unknown, ignored, clean
1623
1623
1624 if listsubrepos:
1624 if listsubrepos:
1625 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1625 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1626 if working:
1626 if working:
1627 rev2 = None
1627 rev2 = None
1628 else:
1628 else:
1629 rev2 = ctx2.substate[subpath][1]
1629 rev2 = ctx2.substate[subpath][1]
1630 try:
1630 try:
1631 submatch = matchmod.narrowmatcher(subpath, match)
1631 submatch = matchmod.narrowmatcher(subpath, match)
1632 s = sub.status(rev2, match=submatch, ignored=listignored,
1632 s = sub.status(rev2, match=submatch, ignored=listignored,
1633 clean=listclean, unknown=listunknown,
1633 clean=listclean, unknown=listunknown,
1634 listsubrepos=True)
1634 listsubrepos=True)
1635 for rfiles, sfiles in zip(r, s):
1635 for rfiles, sfiles in zip(r, s):
1636 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1636 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1637 except error.LookupError:
1637 except error.LookupError:
1638 self.ui.status(_("skipping missing subrepository: %s\n")
1638 self.ui.status(_("skipping missing subrepository: %s\n")
1639 % subpath)
1639 % subpath)
1640
1640
1641 for l in r:
1641 for l in r:
1642 l.sort()
1642 l.sort()
1643 return r
1643 return r
1644
1644
1645 def heads(self, start=None):
1645 def heads(self, start=None):
1646 heads = self.changelog.heads(start)
1646 heads = self.changelog.heads(start)
1647 # sort the output in rev descending order
1647 # sort the output in rev descending order
1648 return sorted(heads, key=self.changelog.rev, reverse=True)
1648 return sorted(heads, key=self.changelog.rev, reverse=True)
1649
1649
1650 def branchheads(self, branch=None, start=None, closed=False):
1650 def branchheads(self, branch=None, start=None, closed=False):
1651 '''return a (possibly filtered) list of heads for the given branch
1651 '''return a (possibly filtered) list of heads for the given branch
1652
1652
1653 Heads are returned in topological order, from newest to oldest.
1653 Heads are returned in topological order, from newest to oldest.
1654 If branch is None, use the dirstate branch.
1654 If branch is None, use the dirstate branch.
1655 If start is not None, return only heads reachable from start.
1655 If start is not None, return only heads reachable from start.
1656 If closed is True, return heads that are marked as closed as well.
1656 If closed is True, return heads that are marked as closed as well.
1657 '''
1657 '''
1658 if branch is None:
1658 if branch is None:
1659 branch = self[None].branch()
1659 branch = self[None].branch()
1660 branches = self.branchmap()
1660 branches = self.branchmap()
1661 if branch not in branches:
1661 if branch not in branches:
1662 return []
1662 return []
1663 # the cache returns heads ordered lowest to highest
1663 # the cache returns heads ordered lowest to highest
1664 bheads = list(reversed(branches[branch]))
1664 bheads = list(reversed(branches[branch]))
1665 if start is not None:
1665 if start is not None:
1666 # filter out the heads that cannot be reached from startrev
1666 # filter out the heads that cannot be reached from startrev
1667 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1667 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1668 bheads = [h for h in bheads if h in fbheads]
1668 bheads = [h for h in bheads if h in fbheads]
1669 if not closed:
1669 if not closed:
1670 bheads = [h for h in bheads if not self[h].closesbranch()]
1670 bheads = [h for h in bheads if not self[h].closesbranch()]
1671 return bheads
1671 return bheads
1672
1672
1673 def branches(self, nodes):
1673 def branches(self, nodes):
1674 if not nodes:
1674 if not nodes:
1675 nodes = [self.changelog.tip()]
1675 nodes = [self.changelog.tip()]
1676 b = []
1676 b = []
1677 for n in nodes:
1677 for n in nodes:
1678 t = n
1678 t = n
1679 while True:
1679 while True:
1680 p = self.changelog.parents(n)
1680 p = self.changelog.parents(n)
1681 if p[1] != nullid or p[0] == nullid:
1681 if p[1] != nullid or p[0] == nullid:
1682 b.append((t, n, p[0], p[1]))
1682 b.append((t, n, p[0], p[1]))
1683 break
1683 break
1684 n = p[0]
1684 n = p[0]
1685 return b
1685 return b
1686
1686
1687 def between(self, pairs):
1687 def between(self, pairs):
1688 r = []
1688 r = []
1689
1689
1690 for top, bottom in pairs:
1690 for top, bottom in pairs:
1691 n, l, i = top, [], 0
1691 n, l, i = top, [], 0
1692 f = 1
1692 f = 1
1693
1693
1694 while n != bottom and n != nullid:
1694 while n != bottom and n != nullid:
1695 p = self.changelog.parents(n)[0]
1695 p = self.changelog.parents(n)[0]
1696 if i == f:
1696 if i == f:
1697 l.append(n)
1697 l.append(n)
1698 f = f * 2
1698 f = f * 2
1699 n = p
1699 n = p
1700 i += 1
1700 i += 1
1701
1701
1702 r.append(l)
1702 r.append(l)
1703
1703
1704 return r
1704 return r
1705
1705
1706 def pull(self, remote, heads=None, force=False):
1706 def pull(self, remote, heads=None, force=False):
1707 # don't open transaction for nothing or you break future useful
1707 # don't open transaction for nothing or you break future useful
1708 # rollback call
1708 # rollback call
1709 tr = None
1709 tr = None
1710 trname = 'pull\n' + util.hidepassword(remote.url())
1710 trname = 'pull\n' + util.hidepassword(remote.url())
1711 lock = self.lock()
1711 lock = self.lock()
1712 try:
1712 try:
1713 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1713 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1714 force=force)
1714 force=force)
1715 common, fetch, rheads = tmp
1715 common, fetch, rheads = tmp
1716 if not fetch:
1716 if not fetch:
1717 self.ui.status(_("no changes found\n"))
1717 self.ui.status(_("no changes found\n"))
1718 added = []
1718 added = []
1719 result = 0
1719 result = 0
1720 else:
1720 else:
1721 tr = self.transaction(trname)
1721 tr = self.transaction(trname)
1722 if heads is None and list(common) == [nullid]:
1722 if heads is None and list(common) == [nullid]:
1723 self.ui.status(_("requesting all changes\n"))
1723 self.ui.status(_("requesting all changes\n"))
1724 elif heads is None and remote.capable('changegroupsubset'):
1724 elif heads is None and remote.capable('changegroupsubset'):
1725 # issue1320, avoid a race if remote changed after discovery
1725 # issue1320, avoid a race if remote changed after discovery
1726 heads = rheads
1726 heads = rheads
1727
1727
1728 if remote.capable('getbundle'):
1728 if remote.capable('getbundle'):
1729 cg = remote.getbundle('pull', common=common,
1729 cg = remote.getbundle('pull', common=common,
1730 heads=heads or rheads)
1730 heads=heads or rheads)
1731 elif heads is None:
1731 elif heads is None:
1732 cg = remote.changegroup(fetch, 'pull')
1732 cg = remote.changegroup(fetch, 'pull')
1733 elif not remote.capable('changegroupsubset'):
1733 elif not remote.capable('changegroupsubset'):
1734 raise util.Abort(_("partial pull cannot be done because "
1734 raise util.Abort(_("partial pull cannot be done because "
1735 "other repository doesn't support "
1735 "other repository doesn't support "
1736 "changegroupsubset."))
1736 "changegroupsubset."))
1737 else:
1737 else:
1738 cg = remote.changegroupsubset(fetch, heads, 'pull')
1738 cg = remote.changegroupsubset(fetch, heads, 'pull')
1739 clstart = len(self.changelog)
1739 clstart = len(self.changelog)
1740 result = self.addchangegroup(cg, 'pull', remote.url())
1740 result = self.addchangegroup(cg, 'pull', remote.url())
1741 clend = len(self.changelog)
1741 clend = len(self.changelog)
1742 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1742 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1743
1743
1744 # compute target subset
1744 # compute target subset
1745 if heads is None:
1745 if heads is None:
1746 # We pulled every thing possible
1746 # We pulled every thing possible
1747 # sync on everything common
1747 # sync on everything common
1748 subset = common + added
1748 subset = common + added
1749 else:
1749 else:
1750 # We pulled a specific subset
1750 # We pulled a specific subset
1751 # sync on this subset
1751 # sync on this subset
1752 subset = heads
1752 subset = heads
1753
1753
1754 # Get remote phases data from remote
1754 # Get remote phases data from remote
1755 remotephases = remote.listkeys('phases')
1755 remotephases = remote.listkeys('phases')
1756 publishing = bool(remotephases.get('publishing', False))
1756 publishing = bool(remotephases.get('publishing', False))
1757 if remotephases and not publishing:
1757 if remotephases and not publishing:
1758 # remote is new and unpublishing
1758 # remote is new and unpublishing
1759 pheads, _dr = phases.analyzeremotephases(self, subset,
1759 pheads, _dr = phases.analyzeremotephases(self, subset,
1760 remotephases)
1760 remotephases)
1761 phases.advanceboundary(self, phases.public, pheads)
1761 phases.advanceboundary(self, phases.public, pheads)
1762 phases.advanceboundary(self, phases.draft, subset)
1762 phases.advanceboundary(self, phases.draft, subset)
1763 else:
1763 else:
1764 # Remote is old or publishing all common changesets
1764 # Remote is old or publishing all common changesets
1765 # should be seen as public
1765 # should be seen as public
1766 phases.advanceboundary(self, phases.public, subset)
1766 phases.advanceboundary(self, phases.public, subset)
1767
1767
1768 remoteobs = remote.listkeys('obsolete')
1768 remoteobs = remote.listkeys('obsolete')
1769 if 'dump' in remoteobs:
1769 if 'dump' in remoteobs:
1770 if tr is None:
1770 if tr is None:
1771 tr = self.transaction(trname)
1771 tr = self.transaction(trname)
1772 data = base85.b85decode(remoteobs['dump'])
1772 data = base85.b85decode(remoteobs['dump'])
1773 self.obsstore.mergemarkers(tr, data)
1773 self.obsstore.mergemarkers(tr, data)
1774 if tr is not None:
1774 if tr is not None:
1775 tr.close()
1775 tr.close()
1776 finally:
1776 finally:
1777 if tr is not None:
1777 if tr is not None:
1778 tr.release()
1778 tr.release()
1779 lock.release()
1779 lock.release()
1780
1780
1781 return result
1781 return result
1782
1782
1783 def checkpush(self, force, revs):
1783 def checkpush(self, force, revs):
1784 """Extensions can override this function if additional checks have
1784 """Extensions can override this function if additional checks have
1785 to be performed before pushing, or call it if they override push
1785 to be performed before pushing, or call it if they override push
1786 command.
1786 command.
1787 """
1787 """
1788 pass
1788 pass
1789
1789
1790 def push(self, remote, force=False, revs=None, newbranch=False):
1790 def push(self, remote, force=False, revs=None, newbranch=False):
1791 '''Push outgoing changesets (limited by revs) from the current
1791 '''Push outgoing changesets (limited by revs) from the current
1792 repository to remote. Return an integer:
1792 repository to remote. Return an integer:
1793 - None means nothing to push
1793 - None means nothing to push
1794 - 0 means HTTP error
1794 - 0 means HTTP error
1795 - 1 means we pushed and remote head count is unchanged *or*
1795 - 1 means we pushed and remote head count is unchanged *or*
1796 we have outgoing changesets but refused to push
1796 we have outgoing changesets but refused to push
1797 - other values as described by addchangegroup()
1797 - other values as described by addchangegroup()
1798 '''
1798 '''
1799 # there are two ways to push to remote repo:
1799 # there are two ways to push to remote repo:
1800 #
1800 #
1801 # addchangegroup assumes local user can lock remote
1801 # addchangegroup assumes local user can lock remote
1802 # repo (local filesystem, old ssh servers).
1802 # repo (local filesystem, old ssh servers).
1803 #
1803 #
1804 # unbundle assumes local user cannot lock remote repo (new ssh
1804 # unbundle assumes local user cannot lock remote repo (new ssh
1805 # servers, http servers).
1805 # servers, http servers).
1806
1806
1807 if not remote.canpush():
1807 if not remote.canpush():
1808 raise util.Abort(_("destination does not support push"))
1808 raise util.Abort(_("destination does not support push"))
1809 # get local lock as we might write phase data
1809 # get local lock as we might write phase data
1810 locallock = self.lock()
1810 locallock = self.lock()
1811 try:
1811 try:
1812 self.checkpush(force, revs)
1812 self.checkpush(force, revs)
1813 lock = None
1813 lock = None
1814 unbundle = remote.capable('unbundle')
1814 unbundle = remote.capable('unbundle')
1815 if not unbundle:
1815 if not unbundle:
1816 lock = remote.lock()
1816 lock = remote.lock()
1817 try:
1817 try:
1818 # discovery
1818 # discovery
1819 fci = discovery.findcommonincoming
1819 fci = discovery.findcommonincoming
1820 commoninc = fci(self, remote, force=force)
1820 commoninc = fci(self, remote, force=force)
1821 common, inc, remoteheads = commoninc
1821 common, inc, remoteheads = commoninc
1822 fco = discovery.findcommonoutgoing
1822 fco = discovery.findcommonoutgoing
1823 outgoing = fco(self, remote, onlyheads=revs,
1823 outgoing = fco(self, remote, onlyheads=revs,
1824 commoninc=commoninc, force=force)
1824 commoninc=commoninc, force=force)
1825
1825
1826
1826
1827 if not outgoing.missing:
1827 if not outgoing.missing:
1828 # nothing to push
1828 # nothing to push
1829 scmutil.nochangesfound(self.ui, outgoing.excluded)
1829 scmutil.nochangesfound(self.ui, outgoing.excluded)
1830 ret = None
1830 ret = None
1831 else:
1831 else:
1832 # something to push
1832 # something to push
1833 if not force:
1833 if not force:
1834 # if self.obsstore == False --> no obsolete
1834 # if self.obsstore == False --> no obsolete
1835 # then, save the iteration
1835 # then, save the iteration
1836 if self.obsstore:
1836 if self.obsstore:
1837 # this message are here for 80 char limit reason
1837 # this message are here for 80 char limit reason
1838 mso = _("push includes an obsolete changeset: %s!")
1838 mso = _("push includes an obsolete changeset: %s!")
1839 msu = _("push includes an unstable changeset: %s!")
1839 msu = _("push includes an unstable changeset: %s!")
1840 # If we are to push if there is at least one
1840 # If we are to push if there is at least one
1841 # obsolete or unstable changeset in missing, at
1841 # obsolete or unstable changeset in missing, at
1842 # least one of the missinghead will be obsolete or
1842 # least one of the missinghead will be obsolete or
1843 # unstable. So checking heads only is ok
1843 # unstable. So checking heads only is ok
1844 for node in outgoing.missingheads:
1844 for node in outgoing.missingheads:
1845 ctx = self[node]
1845 ctx = self[node]
1846 if ctx.obsolete():
1846 if ctx.obsolete():
1847 raise util.Abort(_(mso) % ctx)
1847 raise util.Abort(_(mso) % ctx)
1848 elif ctx.unstable():
1848 elif ctx.unstable():
1849 raise util.Abort(_(msu) % ctx)
1849 raise util.Abort(_(msu) % ctx)
1850 discovery.checkheads(self, remote, outgoing,
1850 discovery.checkheads(self, remote, outgoing,
1851 remoteheads, newbranch,
1851 remoteheads, newbranch,
1852 bool(inc))
1852 bool(inc))
1853
1853
1854 # create a changegroup from local
1854 # create a changegroup from local
1855 if revs is None and not outgoing.excluded:
1855 if revs is None and not outgoing.excluded:
1856 # push everything,
1856 # push everything,
1857 # use the fast path, no race possible on push
1857 # use the fast path, no race possible on push
1858 cg = self._changegroup(outgoing.missing, 'push')
1858 cg = self._changegroup(outgoing.missing, 'push')
1859 else:
1859 else:
1860 cg = self.getlocalbundle('push', outgoing)
1860 cg = self.getlocalbundle('push', outgoing)
1861
1861
1862 # apply changegroup to remote
1862 # apply changegroup to remote
1863 if unbundle:
1863 if unbundle:
1864 # local repo finds heads on server, finds out what
1864 # local repo finds heads on server, finds out what
1865 # revs it must push. once revs transferred, if server
1865 # revs it must push. once revs transferred, if server
1866 # finds it has different heads (someone else won
1866 # finds it has different heads (someone else won
1867 # commit/push race), server aborts.
1867 # commit/push race), server aborts.
1868 if force:
1868 if force:
1869 remoteheads = ['force']
1869 remoteheads = ['force']
1870 # ssh: return remote's addchangegroup()
1870 # ssh: return remote's addchangegroup()
1871 # http: return remote's addchangegroup() or 0 for error
1871 # http: return remote's addchangegroup() or 0 for error
1872 ret = remote.unbundle(cg, remoteheads, 'push')
1872 ret = remote.unbundle(cg, remoteheads, 'push')
1873 else:
1873 else:
1874 # we return an integer indicating remote head count
1874 # we return an integer indicating remote head count
1875 # change
1875 # change
1876 ret = remote.addchangegroup(cg, 'push', self.url())
1876 ret = remote.addchangegroup(cg, 'push', self.url())
1877
1877
1878 if ret:
1878 if ret:
1879 # push succeed, synchonize target of the push
1879 # push succeed, synchonize target of the push
1880 cheads = outgoing.missingheads
1880 cheads = outgoing.missingheads
1881 elif revs is None:
1881 elif revs is None:
1882 # All out push fails. synchronize all common
1882 # All out push fails. synchronize all common
1883 cheads = outgoing.commonheads
1883 cheads = outgoing.commonheads
1884 else:
1884 else:
1885 # I want cheads = heads(::missingheads and ::commonheads)
1885 # I want cheads = heads(::missingheads and ::commonheads)
1886 # (missingheads is revs with secret changeset filtered out)
1886 # (missingheads is revs with secret changeset filtered out)
1887 #
1887 #
1888 # This can be expressed as:
1888 # This can be expressed as:
1889 # cheads = ( (missingheads and ::commonheads)
1889 # cheads = ( (missingheads and ::commonheads)
1890 # + (commonheads and ::missingheads))"
1890 # + (commonheads and ::missingheads))"
1891 # )
1891 # )
1892 #
1892 #
1893 # while trying to push we already computed the following:
1893 # while trying to push we already computed the following:
1894 # common = (::commonheads)
1894 # common = (::commonheads)
1895 # missing = ((commonheads::missingheads) - commonheads)
1895 # missing = ((commonheads::missingheads) - commonheads)
1896 #
1896 #
1897 # We can pick:
1897 # We can pick:
1898 # * missingheads part of comon (::commonheads)
1898 # * missingheads part of comon (::commonheads)
1899 common = set(outgoing.common)
1899 common = set(outgoing.common)
1900 cheads = [node for node in revs if node in common]
1900 cheads = [node for node in revs if node in common]
1901 # and
1901 # and
1902 # * commonheads parents on missing
1902 # * commonheads parents on missing
1903 revset = self.set('%ln and parents(roots(%ln))',
1903 revset = self.set('%ln and parents(roots(%ln))',
1904 outgoing.commonheads,
1904 outgoing.commonheads,
1905 outgoing.missing)
1905 outgoing.missing)
1906 cheads.extend(c.node() for c in revset)
1906 cheads.extend(c.node() for c in revset)
1907 # even when we don't push, exchanging phase data is useful
1907 # even when we don't push, exchanging phase data is useful
1908 remotephases = remote.listkeys('phases')
1908 remotephases = remote.listkeys('phases')
1909 if not remotephases: # old server or public only repo
1909 if not remotephases: # old server or public only repo
1910 phases.advanceboundary(self, phases.public, cheads)
1910 phases.advanceboundary(self, phases.public, cheads)
1911 # don't push any phase data as there is nothing to push
1911 # don't push any phase data as there is nothing to push
1912 else:
1912 else:
1913 ana = phases.analyzeremotephases(self, cheads, remotephases)
1913 ana = phases.analyzeremotephases(self, cheads, remotephases)
1914 pheads, droots = ana
1914 pheads, droots = ana
1915 ### Apply remote phase on local
1915 ### Apply remote phase on local
1916 if remotephases.get('publishing', False):
1916 if remotephases.get('publishing', False):
1917 phases.advanceboundary(self, phases.public, cheads)
1917 phases.advanceboundary(self, phases.public, cheads)
1918 else: # publish = False
1918 else: # publish = False
1919 phases.advanceboundary(self, phases.public, pheads)
1919 phases.advanceboundary(self, phases.public, pheads)
1920 phases.advanceboundary(self, phases.draft, cheads)
1920 phases.advanceboundary(self, phases.draft, cheads)
1921 ### Apply local phase on remote
1921 ### Apply local phase on remote
1922
1922
1923 # Get the list of all revs draft on remote by public here.
1923 # Get the list of all revs draft on remote by public here.
1924 # XXX Beware that revset break if droots is not strictly
1924 # XXX Beware that revset break if droots is not strictly
1925 # XXX root we may want to ensure it is but it is costly
1925 # XXX root we may want to ensure it is but it is costly
1926 outdated = self.set('heads((%ln::%ln) and public())',
1926 outdated = self.set('heads((%ln::%ln) and public())',
1927 droots, cheads)
1927 droots, cheads)
1928 for newremotehead in outdated:
1928 for newremotehead in outdated:
1929 r = remote.pushkey('phases',
1929 r = remote.pushkey('phases',
1930 newremotehead.hex(),
1930 newremotehead.hex(),
1931 str(phases.draft),
1931 str(phases.draft),
1932 str(phases.public))
1932 str(phases.public))
1933 if not r:
1933 if not r:
1934 self.ui.warn(_('updating %s to public failed!\n')
1934 self.ui.warn(_('updating %s to public failed!\n')
1935 % newremotehead)
1935 % newremotehead)
1936 if ('obsolete' in remote.listkeys('namespaces')
1936 if ('obsolete' in remote.listkeys('namespaces')
1937 and self.obsstore):
1937 and self.obsstore):
1938 data = self.listkeys('obsolete')['dump']
1938 data = self.listkeys('obsolete')['dump']
1939 r = remote.pushkey('obsolete', 'dump', '', data)
1939 r = remote.pushkey('obsolete', 'dump', '', data)
1940 if not r:
1940 if not r:
1941 self.ui.warn(_('failed to push obsolete markers!\n'))
1941 self.ui.warn(_('failed to push obsolete markers!\n'))
1942 finally:
1942 finally:
1943 if lock is not None:
1943 if lock is not None:
1944 lock.release()
1944 lock.release()
1945 finally:
1945 finally:
1946 locallock.release()
1946 locallock.release()
1947
1947
1948 self.ui.debug("checking for updated bookmarks\n")
1948 self.ui.debug("checking for updated bookmarks\n")
1949 rb = remote.listkeys('bookmarks')
1949 rb = remote.listkeys('bookmarks')
1950 for k in rb.keys():
1950 for k in rb.keys():
1951 if k in self._bookmarks:
1951 if k in self._bookmarks:
1952 nr, nl = rb[k], hex(self._bookmarks[k])
1952 nr, nl = rb[k], hex(self._bookmarks[k])
1953 if nr in self:
1953 if nr in self:
1954 cr = self[nr]
1954 cr = self[nr]
1955 cl = self[nl]
1955 cl = self[nl]
1956 if cl in cr.descendants():
1956 if cl in cr.descendants():
1957 r = remote.pushkey('bookmarks', k, nr, nl)
1957 r = remote.pushkey('bookmarks', k, nr, nl)
1958 if r:
1958 if r:
1959 self.ui.status(_("updating bookmark %s\n") % k)
1959 self.ui.status(_("updating bookmark %s\n") % k)
1960 else:
1960 else:
1961 self.ui.warn(_('updating bookmark %s'
1961 self.ui.warn(_('updating bookmark %s'
1962 ' failed!\n') % k)
1962 ' failed!\n') % k)
1963
1963
1964 return ret
1964 return ret
1965
1965
1966 def changegroupinfo(self, nodes, source):
1966 def changegroupinfo(self, nodes, source):
1967 if self.ui.verbose or source == 'bundle':
1967 if self.ui.verbose or source == 'bundle':
1968 self.ui.status(_("%d changesets found\n") % len(nodes))
1968 self.ui.status(_("%d changesets found\n") % len(nodes))
1969 if self.ui.debugflag:
1969 if self.ui.debugflag:
1970 self.ui.debug("list of changesets:\n")
1970 self.ui.debug("list of changesets:\n")
1971 for node in nodes:
1971 for node in nodes:
1972 self.ui.debug("%s\n" % hex(node))
1972 self.ui.debug("%s\n" % hex(node))
1973
1973
1974 def changegroupsubset(self, bases, heads, source):
1974 def changegroupsubset(self, bases, heads, source):
1975 """Compute a changegroup consisting of all the nodes that are
1975 """Compute a changegroup consisting of all the nodes that are
1976 descendants of any of the bases and ancestors of any of the heads.
1976 descendants of any of the bases and ancestors of any of the heads.
1977 Return a chunkbuffer object whose read() method will return
1977 Return a chunkbuffer object whose read() method will return
1978 successive changegroup chunks.
1978 successive changegroup chunks.
1979
1979
1980 It is fairly complex as determining which filenodes and which
1980 It is fairly complex as determining which filenodes and which
1981 manifest nodes need to be included for the changeset to be complete
1981 manifest nodes need to be included for the changeset to be complete
1982 is non-trivial.
1982 is non-trivial.
1983
1983
1984 Another wrinkle is doing the reverse, figuring out which changeset in
1984 Another wrinkle is doing the reverse, figuring out which changeset in
1985 the changegroup a particular filenode or manifestnode belongs to.
1985 the changegroup a particular filenode or manifestnode belongs to.
1986 """
1986 """
1987 cl = self.changelog
1987 cl = self.changelog
1988 if not bases:
1988 if not bases:
1989 bases = [nullid]
1989 bases = [nullid]
1990 csets, bases, heads = cl.nodesbetween(bases, heads)
1990 csets, bases, heads = cl.nodesbetween(bases, heads)
1991 # We assume that all ancestors of bases are known
1991 # We assume that all ancestors of bases are known
1992 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1992 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1993 return self._changegroupsubset(common, csets, heads, source)
1993 return self._changegroupsubset(common, csets, heads, source)
1994
1994
1995 def getlocalbundle(self, source, outgoing):
1995 def getlocalbundle(self, source, outgoing):
1996 """Like getbundle, but taking a discovery.outgoing as an argument.
1996 """Like getbundle, but taking a discovery.outgoing as an argument.
1997
1997
1998 This is only implemented for local repos and reuses potentially
1998 This is only implemented for local repos and reuses potentially
1999 precomputed sets in outgoing."""
1999 precomputed sets in outgoing."""
2000 if not outgoing.missing:
2000 if not outgoing.missing:
2001 return None
2001 return None
2002 return self._changegroupsubset(outgoing.common,
2002 return self._changegroupsubset(outgoing.common,
2003 outgoing.missing,
2003 outgoing.missing,
2004 outgoing.missingheads,
2004 outgoing.missingheads,
2005 source)
2005 source)
2006
2006
2007 def getbundle(self, source, heads=None, common=None):
2007 def getbundle(self, source, heads=None, common=None):
2008 """Like changegroupsubset, but returns the set difference between the
2008 """Like changegroupsubset, but returns the set difference between the
2009 ancestors of heads and the ancestors common.
2009 ancestors of heads and the ancestors common.
2010
2010
2011 If heads is None, use the local heads. If common is None, use [nullid].
2011 If heads is None, use the local heads. If common is None, use [nullid].
2012
2012
2013 The nodes in common might not all be known locally due to the way the
2013 The nodes in common might not all be known locally due to the way the
2014 current discovery protocol works.
2014 current discovery protocol works.
2015 """
2015 """
2016 cl = self.changelog
2016 cl = self.changelog
2017 if common:
2017 if common:
2018 nm = cl.nodemap
2018 nm = cl.nodemap
2019 common = [n for n in common if n in nm]
2019 common = [n for n in common if n in nm]
2020 else:
2020 else:
2021 common = [nullid]
2021 common = [nullid]
2022 if not heads:
2022 if not heads:
2023 heads = cl.heads()
2023 heads = cl.heads()
2024 return self.getlocalbundle(source,
2024 return self.getlocalbundle(source,
2025 discovery.outgoing(cl, common, heads))
2025 discovery.outgoing(cl, common, heads))
2026
2026
2027 def _changegroupsubset(self, commonrevs, csets, heads, source):
2027 def _changegroupsubset(self, commonrevs, csets, heads, source):
2028
2028
2029 cl = self.changelog
2029 cl = self.changelog
2030 mf = self.manifest
2030 mf = self.manifest
2031 mfs = {} # needed manifests
2031 mfs = {} # needed manifests
2032 fnodes = {} # needed file nodes
2032 fnodes = {} # needed file nodes
2033 changedfiles = set()
2033 changedfiles = set()
2034 fstate = ['', {}]
2034 fstate = ['', {}]
2035 count = [0, 0]
2035 count = [0, 0]
2036
2036
2037 # can we go through the fast path ?
2037 # can we go through the fast path ?
2038 heads.sort()
2038 heads.sort()
2039 if heads == sorted(self.heads()):
2039 if heads == sorted(self.heads()):
2040 return self._changegroup(csets, source)
2040 return self._changegroup(csets, source)
2041
2041
2042 # slow path
2042 # slow path
2043 self.hook('preoutgoing', throw=True, source=source)
2043 self.hook('preoutgoing', throw=True, source=source)
2044 self.changegroupinfo(csets, source)
2044 self.changegroupinfo(csets, source)
2045
2045
2046 # filter any nodes that claim to be part of the known set
2046 # filter any nodes that claim to be part of the known set
2047 def prune(revlog, missing):
2047 def prune(revlog, missing):
2048 rr, rl = revlog.rev, revlog.linkrev
2048 rr, rl = revlog.rev, revlog.linkrev
2049 return [n for n in missing
2049 return [n for n in missing
2050 if rl(rr(n)) not in commonrevs]
2050 if rl(rr(n)) not in commonrevs]
2051
2051
2052 progress = self.ui.progress
2052 progress = self.ui.progress
2053 _bundling = _('bundling')
2053 _bundling = _('bundling')
2054 _changesets = _('changesets')
2054 _changesets = _('changesets')
2055 _manifests = _('manifests')
2055 _manifests = _('manifests')
2056 _files = _('files')
2056 _files = _('files')
2057
2057
2058 def lookup(revlog, x):
2058 def lookup(revlog, x):
2059 if revlog == cl:
2059 if revlog == cl:
2060 c = cl.read(x)
2060 c = cl.read(x)
2061 changedfiles.update(c[3])
2061 changedfiles.update(c[3])
2062 mfs.setdefault(c[0], x)
2062 mfs.setdefault(c[0], x)
2063 count[0] += 1
2063 count[0] += 1
2064 progress(_bundling, count[0],
2064 progress(_bundling, count[0],
2065 unit=_changesets, total=count[1])
2065 unit=_changesets, total=count[1])
2066 return x
2066 return x
2067 elif revlog == mf:
2067 elif revlog == mf:
2068 clnode = mfs[x]
2068 clnode = mfs[x]
2069 mdata = mf.readfast(x)
2069 mdata = mf.readfast(x)
2070 for f, n in mdata.iteritems():
2070 for f, n in mdata.iteritems():
2071 if f in changedfiles:
2071 if f in changedfiles:
2072 fnodes[f].setdefault(n, clnode)
2072 fnodes[f].setdefault(n, clnode)
2073 count[0] += 1
2073 count[0] += 1
2074 progress(_bundling, count[0],
2074 progress(_bundling, count[0],
2075 unit=_manifests, total=count[1])
2075 unit=_manifests, total=count[1])
2076 return clnode
2076 return clnode
2077 else:
2077 else:
2078 progress(_bundling, count[0], item=fstate[0],
2078 progress(_bundling, count[0], item=fstate[0],
2079 unit=_files, total=count[1])
2079 unit=_files, total=count[1])
2080 return fstate[1][x]
2080 return fstate[1][x]
2081
2081
2082 bundler = changegroup.bundle10(lookup)
2082 bundler = changegroup.bundle10(lookup)
2083 reorder = self.ui.config('bundle', 'reorder', 'auto')
2083 reorder = self.ui.config('bundle', 'reorder', 'auto')
2084 if reorder == 'auto':
2084 if reorder == 'auto':
2085 reorder = None
2085 reorder = None
2086 else:
2086 else:
2087 reorder = util.parsebool(reorder)
2087 reorder = util.parsebool(reorder)
2088
2088
2089 def gengroup():
2089 def gengroup():
2090 # Create a changenode group generator that will call our functions
2090 # Create a changenode group generator that will call our functions
2091 # back to lookup the owning changenode and collect information.
2091 # back to lookup the owning changenode and collect information.
2092 count[:] = [0, len(csets)]
2092 count[:] = [0, len(csets)]
2093 for chunk in cl.group(csets, bundler, reorder=reorder):
2093 for chunk in cl.group(csets, bundler, reorder=reorder):
2094 yield chunk
2094 yield chunk
2095 progress(_bundling, None)
2095 progress(_bundling, None)
2096
2096
2097 # Create a generator for the manifestnodes that calls our lookup
2097 # Create a generator for the manifestnodes that calls our lookup
2098 # and data collection functions back.
2098 # and data collection functions back.
2099 for f in changedfiles:
2099 for f in changedfiles:
2100 fnodes[f] = {}
2100 fnodes[f] = {}
2101 count[:] = [0, len(mfs)]
2101 count[:] = [0, len(mfs)]
2102 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2102 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2103 yield chunk
2103 yield chunk
2104 progress(_bundling, None)
2104 progress(_bundling, None)
2105
2105
2106 mfs.clear()
2106 mfs.clear()
2107
2107
2108 # Go through all our files in order sorted by name.
2108 # Go through all our files in order sorted by name.
2109 count[:] = [0, len(changedfiles)]
2109 count[:] = [0, len(changedfiles)]
2110 for fname in sorted(changedfiles):
2110 for fname in sorted(changedfiles):
2111 filerevlog = self.file(fname)
2111 filerevlog = self.file(fname)
2112 if not len(filerevlog):
2112 if not len(filerevlog):
2113 raise util.Abort(_("empty or missing revlog for %s")
2113 raise util.Abort(_("empty or missing revlog for %s")
2114 % fname)
2114 % fname)
2115 fstate[0] = fname
2115 fstate[0] = fname
2116 fstate[1] = fnodes.pop(fname, {})
2116 fstate[1] = fnodes.pop(fname, {})
2117
2117
2118 nodelist = prune(filerevlog, fstate[1])
2118 nodelist = prune(filerevlog, fstate[1])
2119 if nodelist:
2119 if nodelist:
2120 count[0] += 1
2120 count[0] += 1
2121 yield bundler.fileheader(fname)
2121 yield bundler.fileheader(fname)
2122 for chunk in filerevlog.group(nodelist, bundler, reorder):
2122 for chunk in filerevlog.group(nodelist, bundler, reorder):
2123 yield chunk
2123 yield chunk
2124
2124
2125 # Signal that no more groups are left.
2125 # Signal that no more groups are left.
2126 yield bundler.close()
2126 yield bundler.close()
2127 progress(_bundling, None)
2127 progress(_bundling, None)
2128
2128
2129 if csets:
2129 if csets:
2130 self.hook('outgoing', node=hex(csets[0]), source=source)
2130 self.hook('outgoing', node=hex(csets[0]), source=source)
2131
2131
2132 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2132 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2133
2133
2134 def changegroup(self, basenodes, source):
2134 def changegroup(self, basenodes, source):
2135 # to avoid a race we use changegroupsubset() (issue1320)
2135 # to avoid a race we use changegroupsubset() (issue1320)
2136 return self.changegroupsubset(basenodes, self.heads(), source)
2136 return self.changegroupsubset(basenodes, self.heads(), source)
2137
2137
2138 def _changegroup(self, nodes, source):
2138 def _changegroup(self, nodes, source):
2139 """Compute the changegroup of all nodes that we have that a recipient
2139 """Compute the changegroup of all nodes that we have that a recipient
2140 doesn't. Return a chunkbuffer object whose read() method will return
2140 doesn't. Return a chunkbuffer object whose read() method will return
2141 successive changegroup chunks.
2141 successive changegroup chunks.
2142
2142
2143 This is much easier than the previous function as we can assume that
2143 This is much easier than the previous function as we can assume that
2144 the recipient has any changenode we aren't sending them.
2144 the recipient has any changenode we aren't sending them.
2145
2145
2146 nodes is the set of nodes to send"""
2146 nodes is the set of nodes to send"""
2147
2147
2148 cl = self.changelog
2148 cl = self.changelog
2149 mf = self.manifest
2149 mf = self.manifest
2150 mfs = {}
2150 mfs = {}
2151 changedfiles = set()
2151 changedfiles = set()
2152 fstate = ['']
2152 fstate = ['']
2153 count = [0, 0]
2153 count = [0, 0]
2154
2154
2155 self.hook('preoutgoing', throw=True, source=source)
2155 self.hook('preoutgoing', throw=True, source=source)
2156 self.changegroupinfo(nodes, source)
2156 self.changegroupinfo(nodes, source)
2157
2157
2158 revset = set([cl.rev(n) for n in nodes])
2158 revset = set([cl.rev(n) for n in nodes])
2159
2159
2160 def gennodelst(log):
2160 def gennodelst(log):
2161 ln, llr = log.node, log.linkrev
2161 ln, llr = log.node, log.linkrev
2162 return [ln(r) for r in log if llr(r) in revset]
2162 return [ln(r) for r in log if llr(r) in revset]
2163
2163
2164 progress = self.ui.progress
2164 progress = self.ui.progress
2165 _bundling = _('bundling')
2165 _bundling = _('bundling')
2166 _changesets = _('changesets')
2166 _changesets = _('changesets')
2167 _manifests = _('manifests')
2167 _manifests = _('manifests')
2168 _files = _('files')
2168 _files = _('files')
2169
2169
2170 def lookup(revlog, x):
2170 def lookup(revlog, x):
2171 if revlog == cl:
2171 if revlog == cl:
2172 c = cl.read(x)
2172 c = cl.read(x)
2173 changedfiles.update(c[3])
2173 changedfiles.update(c[3])
2174 mfs.setdefault(c[0], x)
2174 mfs.setdefault(c[0], x)
2175 count[0] += 1
2175 count[0] += 1
2176 progress(_bundling, count[0],
2176 progress(_bundling, count[0],
2177 unit=_changesets, total=count[1])
2177 unit=_changesets, total=count[1])
2178 return x
2178 return x
2179 elif revlog == mf:
2179 elif revlog == mf:
2180 count[0] += 1
2180 count[0] += 1
2181 progress(_bundling, count[0],
2181 progress(_bundling, count[0],
2182 unit=_manifests, total=count[1])
2182 unit=_manifests, total=count[1])
2183 return cl.node(revlog.linkrev(revlog.rev(x)))
2183 return cl.node(revlog.linkrev(revlog.rev(x)))
2184 else:
2184 else:
2185 progress(_bundling, count[0], item=fstate[0],
2185 progress(_bundling, count[0], item=fstate[0],
2186 total=count[1], unit=_files)
2186 total=count[1], unit=_files)
2187 return cl.node(revlog.linkrev(revlog.rev(x)))
2187 return cl.node(revlog.linkrev(revlog.rev(x)))
2188
2188
2189 bundler = changegroup.bundle10(lookup)
2189 bundler = changegroup.bundle10(lookup)
2190 reorder = self.ui.config('bundle', 'reorder', 'auto')
2190 reorder = self.ui.config('bundle', 'reorder', 'auto')
2191 if reorder == 'auto':
2191 if reorder == 'auto':
2192 reorder = None
2192 reorder = None
2193 else:
2193 else:
2194 reorder = util.parsebool(reorder)
2194 reorder = util.parsebool(reorder)
2195
2195
2196 def gengroup():
2196 def gengroup():
2197 '''yield a sequence of changegroup chunks (strings)'''
2197 '''yield a sequence of changegroup chunks (strings)'''
2198 # construct a list of all changed files
2198 # construct a list of all changed files
2199
2199
2200 count[:] = [0, len(nodes)]
2200 count[:] = [0, len(nodes)]
2201 for chunk in cl.group(nodes, bundler, reorder=reorder):
2201 for chunk in cl.group(nodes, bundler, reorder=reorder):
2202 yield chunk
2202 yield chunk
2203 progress(_bundling, None)
2203 progress(_bundling, None)
2204
2204
2205 count[:] = [0, len(mfs)]
2205 count[:] = [0, len(mfs)]
2206 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2206 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2207 yield chunk
2207 yield chunk
2208 progress(_bundling, None)
2208 progress(_bundling, None)
2209
2209
2210 count[:] = [0, len(changedfiles)]
2210 count[:] = [0, len(changedfiles)]
2211 for fname in sorted(changedfiles):
2211 for fname in sorted(changedfiles):
2212 filerevlog = self.file(fname)
2212 filerevlog = self.file(fname)
2213 if not len(filerevlog):
2213 if not len(filerevlog):
2214 raise util.Abort(_("empty or missing revlog for %s")
2214 raise util.Abort(_("empty or missing revlog for %s")
2215 % fname)
2215 % fname)
2216 fstate[0] = fname
2216 fstate[0] = fname
2217 nodelist = gennodelst(filerevlog)
2217 nodelist = gennodelst(filerevlog)
2218 if nodelist:
2218 if nodelist:
2219 count[0] += 1
2219 count[0] += 1
2220 yield bundler.fileheader(fname)
2220 yield bundler.fileheader(fname)
2221 for chunk in filerevlog.group(nodelist, bundler, reorder):
2221 for chunk in filerevlog.group(nodelist, bundler, reorder):
2222 yield chunk
2222 yield chunk
2223 yield bundler.close()
2223 yield bundler.close()
2224 progress(_bundling, None)
2224 progress(_bundling, None)
2225
2225
2226 if nodes:
2226 if nodes:
2227 self.hook('outgoing', node=hex(nodes[0]), source=source)
2227 self.hook('outgoing', node=hex(nodes[0]), source=source)
2228
2228
2229 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2229 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2230
2230
2231 def addchangegroup(self, source, srctype, url, emptyok=False):
2231 def addchangegroup(self, source, srctype, url, emptyok=False):
2232 """Add the changegroup returned by source.read() to this repo.
2232 """Add the changegroup returned by source.read() to this repo.
2233 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2233 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2234 the URL of the repo where this changegroup is coming from.
2234 the URL of the repo where this changegroup is coming from.
2235
2235
2236 Return an integer summarizing the change to this repo:
2236 Return an integer summarizing the change to this repo:
2237 - nothing changed or no source: 0
2237 - nothing changed or no source: 0
2238 - more heads than before: 1+added heads (2..n)
2238 - more heads than before: 1+added heads (2..n)
2239 - fewer heads than before: -1-removed heads (-2..-n)
2239 - fewer heads than before: -1-removed heads (-2..-n)
2240 - number of heads stays the same: 1
2240 - number of heads stays the same: 1
2241 """
2241 """
2242 def csmap(x):
2242 def csmap(x):
2243 self.ui.debug("add changeset %s\n" % short(x))
2243 self.ui.debug("add changeset %s\n" % short(x))
2244 return len(cl)
2244 return len(cl)
2245
2245
2246 def revmap(x):
2246 def revmap(x):
2247 return cl.rev(x)
2247 return cl.rev(x)
2248
2248
2249 if not source:
2249 if not source:
2250 return 0
2250 return 0
2251
2251
2252 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2252 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2253
2253
2254 changesets = files = revisions = 0
2254 changesets = files = revisions = 0
2255 efiles = set()
2255 efiles = set()
2256
2256
2257 # write changelog data to temp files so concurrent readers will not see
2257 # write changelog data to temp files so concurrent readers will not see
2258 # inconsistent view
2258 # inconsistent view
2259 cl = self.changelog
2259 cl = self.changelog
2260 cl.delayupdate()
2260 cl.delayupdate()
2261 oldheads = cl.heads()
2261 oldheads = cl.heads()
2262
2262
2263 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2263 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2264 try:
2264 try:
2265 trp = weakref.proxy(tr)
2265 trp = weakref.proxy(tr)
2266 # pull off the changeset group
2266 # pull off the changeset group
2267 self.ui.status(_("adding changesets\n"))
2267 self.ui.status(_("adding changesets\n"))
2268 clstart = len(cl)
2268 clstart = len(cl)
2269 class prog(object):
2269 class prog(object):
2270 step = _('changesets')
2270 step = _('changesets')
2271 count = 1
2271 count = 1
2272 ui = self.ui
2272 ui = self.ui
2273 total = None
2273 total = None
2274 def __call__(self):
2274 def __call__(self):
2275 self.ui.progress(self.step, self.count, unit=_('chunks'),
2275 self.ui.progress(self.step, self.count, unit=_('chunks'),
2276 total=self.total)
2276 total=self.total)
2277 self.count += 1
2277 self.count += 1
2278 pr = prog()
2278 pr = prog()
2279 source.callback = pr
2279 source.callback = pr
2280
2280
2281 source.changelogheader()
2281 source.changelogheader()
2282 srccontent = cl.addgroup(source, csmap, trp)
2282 srccontent = cl.addgroup(source, csmap, trp)
2283 if not (srccontent or emptyok):
2283 if not (srccontent or emptyok):
2284 raise util.Abort(_("received changelog group is empty"))
2284 raise util.Abort(_("received changelog group is empty"))
2285 clend = len(cl)
2285 clend = len(cl)
2286 changesets = clend - clstart
2286 changesets = clend - clstart
2287 for c in xrange(clstart, clend):
2287 for c in xrange(clstart, clend):
2288 efiles.update(self[c].files())
2288 efiles.update(self[c].files())
2289 efiles = len(efiles)
2289 efiles = len(efiles)
2290 self.ui.progress(_('changesets'), None)
2290 self.ui.progress(_('changesets'), None)
2291
2291
2292 # pull off the manifest group
2292 # pull off the manifest group
2293 self.ui.status(_("adding manifests\n"))
2293 self.ui.status(_("adding manifests\n"))
2294 pr.step = _('manifests')
2294 pr.step = _('manifests')
2295 pr.count = 1
2295 pr.count = 1
2296 pr.total = changesets # manifests <= changesets
2296 pr.total = changesets # manifests <= changesets
2297 # no need to check for empty manifest group here:
2297 # no need to check for empty manifest group here:
2298 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2298 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2299 # no new manifest will be created and the manifest group will
2299 # no new manifest will be created and the manifest group will
2300 # be empty during the pull
2300 # be empty during the pull
2301 source.manifestheader()
2301 source.manifestheader()
2302 self.manifest.addgroup(source, revmap, trp)
2302 self.manifest.addgroup(source, revmap, trp)
2303 self.ui.progress(_('manifests'), None)
2303 self.ui.progress(_('manifests'), None)
2304
2304
2305 needfiles = {}
2305 needfiles = {}
2306 if self.ui.configbool('server', 'validate', default=False):
2306 if self.ui.configbool('server', 'validate', default=False):
2307 # validate incoming csets have their manifests
2307 # validate incoming csets have their manifests
2308 for cset in xrange(clstart, clend):
2308 for cset in xrange(clstart, clend):
2309 mfest = self.changelog.read(self.changelog.node(cset))[0]
2309 mfest = self.changelog.read(self.changelog.node(cset))[0]
2310 mfest = self.manifest.readdelta(mfest)
2310 mfest = self.manifest.readdelta(mfest)
2311 # store file nodes we must see
2311 # store file nodes we must see
2312 for f, n in mfest.iteritems():
2312 for f, n in mfest.iteritems():
2313 needfiles.setdefault(f, set()).add(n)
2313 needfiles.setdefault(f, set()).add(n)
2314
2314
2315 # process the files
2315 # process the files
2316 self.ui.status(_("adding file changes\n"))
2316 self.ui.status(_("adding file changes\n"))
2317 pr.step = _('files')
2317 pr.step = _('files')
2318 pr.count = 1
2318 pr.count = 1
2319 pr.total = efiles
2319 pr.total = efiles
2320 source.callback = None
2320 source.callback = None
2321
2321
2322 while True:
2322 while True:
2323 chunkdata = source.filelogheader()
2323 chunkdata = source.filelogheader()
2324 if not chunkdata:
2324 if not chunkdata:
2325 break
2325 break
2326 f = chunkdata["filename"]
2326 f = chunkdata["filename"]
2327 self.ui.debug("adding %s revisions\n" % f)
2327 self.ui.debug("adding %s revisions\n" % f)
2328 pr()
2328 pr()
2329 fl = self.file(f)
2329 fl = self.file(f)
2330 o = len(fl)
2330 o = len(fl)
2331 if not fl.addgroup(source, revmap, trp):
2331 if not fl.addgroup(source, revmap, trp):
2332 raise util.Abort(_("received file revlog group is empty"))
2332 raise util.Abort(_("received file revlog group is empty"))
2333 revisions += len(fl) - o
2333 revisions += len(fl) - o
2334 files += 1
2334 files += 1
2335 if f in needfiles:
2335 if f in needfiles:
2336 needs = needfiles[f]
2336 needs = needfiles[f]
2337 for new in xrange(o, len(fl)):
2337 for new in xrange(o, len(fl)):
2338 n = fl.node(new)
2338 n = fl.node(new)
2339 if n in needs:
2339 if n in needs:
2340 needs.remove(n)
2340 needs.remove(n)
2341 if not needs:
2341 if not needs:
2342 del needfiles[f]
2342 del needfiles[f]
2343 self.ui.progress(_('files'), None)
2343 self.ui.progress(_('files'), None)
2344
2344
2345 for f, needs in needfiles.iteritems():
2345 for f, needs in needfiles.iteritems():
2346 fl = self.file(f)
2346 fl = self.file(f)
2347 for n in needs:
2347 for n in needs:
2348 try:
2348 try:
2349 fl.rev(n)
2349 fl.rev(n)
2350 except error.LookupError:
2350 except error.LookupError:
2351 raise util.Abort(
2351 raise util.Abort(
2352 _('missing file data for %s:%s - run hg verify') %
2352 _('missing file data for %s:%s - run hg verify') %
2353 (f, hex(n)))
2353 (f, hex(n)))
2354
2354
2355 dh = 0
2355 dh = 0
2356 if oldheads:
2356 if oldheads:
2357 heads = cl.heads()
2357 heads = cl.heads()
2358 dh = len(heads) - len(oldheads)
2358 dh = len(heads) - len(oldheads)
2359 for h in heads:
2359 for h in heads:
2360 if h not in oldheads and self[h].closesbranch():
2360 if h not in oldheads and self[h].closesbranch():
2361 dh -= 1
2361 dh -= 1
2362 htext = ""
2362 htext = ""
2363 if dh:
2363 if dh:
2364 htext = _(" (%+d heads)") % dh
2364 htext = _(" (%+d heads)") % dh
2365
2365
2366 self.ui.status(_("added %d changesets"
2366 self.ui.status(_("added %d changesets"
2367 " with %d changes to %d files%s\n")
2367 " with %d changes to %d files%s\n")
2368 % (changesets, revisions, files, htext))
2368 % (changesets, revisions, files, htext))
2369
2369
2370 if changesets > 0:
2370 if changesets > 0:
2371 p = lambda: cl.writepending() and self.root or ""
2371 p = lambda: cl.writepending() and self.root or ""
2372 self.hook('pretxnchangegroup', throw=True,
2372 self.hook('pretxnchangegroup', throw=True,
2373 node=hex(cl.node(clstart)), source=srctype,
2373 node=hex(cl.node(clstart)), source=srctype,
2374 url=url, pending=p)
2374 url=url, pending=p)
2375
2375
2376 added = [cl.node(r) for r in xrange(clstart, clend)]
2376 added = [cl.node(r) for r in xrange(clstart, clend)]
2377 publishing = self.ui.configbool('phases', 'publish', True)
2377 publishing = self.ui.configbool('phases', 'publish', True)
2378 if srctype == 'push':
2378 if srctype == 'push':
2379 # Old server can not push the boundary themself.
2379 # Old server can not push the boundary themself.
2380 # New server won't push the boundary if changeset already
2380 # New server won't push the boundary if changeset already
2381 # existed locally as secrete
2381 # existed locally as secrete
2382 #
2382 #
2383 # We should not use added here but the list of all change in
2383 # We should not use added here but the list of all change in
2384 # the bundle
2384 # the bundle
2385 if publishing:
2385 if publishing:
2386 phases.advanceboundary(self, phases.public, srccontent)
2386 phases.advanceboundary(self, phases.public, srccontent)
2387 else:
2387 else:
2388 phases.advanceboundary(self, phases.draft, srccontent)
2388 phases.advanceboundary(self, phases.draft, srccontent)
2389 phases.retractboundary(self, phases.draft, added)
2389 phases.retractboundary(self, phases.draft, added)
2390 elif srctype != 'strip':
2390 elif srctype != 'strip':
2391 # publishing only alter behavior during push
2391 # publishing only alter behavior during push
2392 #
2392 #
2393 # strip should not touch boundary at all
2393 # strip should not touch boundary at all
2394 phases.retractboundary(self, phases.draft, added)
2394 phases.retractboundary(self, phases.draft, added)
2395
2395
2396 # make changelog see real files again
2396 # make changelog see real files again
2397 cl.finalize(trp)
2397 cl.finalize(trp)
2398
2398
2399 tr.close()
2399 tr.close()
2400
2400
2401 if changesets > 0:
2401 if changesets > 0:
2402 def runhooks():
2402 def runhooks():
2403 # forcefully update the on-disk branch cache
2403 # forcefully update the on-disk branch cache
2404 self.ui.debug("updating the branch cache\n")
2404 self.ui.debug("updating the branch cache\n")
2405 self.updatebranchcache()
2405 self.updatebranchcache()
2406 self.hook("changegroup", node=hex(cl.node(clstart)),
2406 self.hook("changegroup", node=hex(cl.node(clstart)),
2407 source=srctype, url=url)
2407 source=srctype, url=url)
2408
2408
2409 for n in added:
2409 for n in added:
2410 self.hook("incoming", node=hex(n), source=srctype,
2410 self.hook("incoming", node=hex(n), source=srctype,
2411 url=url)
2411 url=url)
2412 self._afterlock(runhooks)
2412 self._afterlock(runhooks)
2413
2413
2414 finally:
2414 finally:
2415 tr.release()
2415 tr.release()
2416 # never return 0 here:
2416 # never return 0 here:
2417 if dh < 0:
2417 if dh < 0:
2418 return dh - 1
2418 return dh - 1
2419 else:
2419 else:
2420 return dh + 1
2420 return dh + 1
2421
2421
2422 def stream_in(self, remote, requirements):
2422 def stream_in(self, remote, requirements):
2423 lock = self.lock()
2423 lock = self.lock()
2424 try:
2424 try:
2425 fp = remote.stream_out()
2425 fp = remote.stream_out()
2426 l = fp.readline()
2426 l = fp.readline()
2427 try:
2427 try:
2428 resp = int(l)
2428 resp = int(l)
2429 except ValueError:
2429 except ValueError:
2430 raise error.ResponseError(
2430 raise error.ResponseError(
2431 _('unexpected response from remote server:'), l)
2431 _('unexpected response from remote server:'), l)
2432 if resp == 1:
2432 if resp == 1:
2433 raise util.Abort(_('operation forbidden by server'))
2433 raise util.Abort(_('operation forbidden by server'))
2434 elif resp == 2:
2434 elif resp == 2:
2435 raise util.Abort(_('locking the remote repository failed'))
2435 raise util.Abort(_('locking the remote repository failed'))
2436 elif resp != 0:
2436 elif resp != 0:
2437 raise util.Abort(_('the server sent an unknown error code'))
2437 raise util.Abort(_('the server sent an unknown error code'))
2438 self.ui.status(_('streaming all changes\n'))
2438 self.ui.status(_('streaming all changes\n'))
2439 l = fp.readline()
2439 l = fp.readline()
2440 try:
2440 try:
2441 total_files, total_bytes = map(int, l.split(' ', 1))
2441 total_files, total_bytes = map(int, l.split(' ', 1))
2442 except (ValueError, TypeError):
2442 except (ValueError, TypeError):
2443 raise error.ResponseError(
2443 raise error.ResponseError(
2444 _('unexpected response from remote server:'), l)
2444 _('unexpected response from remote server:'), l)
2445 self.ui.status(_('%d files to transfer, %s of data\n') %
2445 self.ui.status(_('%d files to transfer, %s of data\n') %
2446 (total_files, util.bytecount(total_bytes)))
2446 (total_files, util.bytecount(total_bytes)))
2447 handled_bytes = 0
2447 handled_bytes = 0
2448 self.ui.progress(_('clone'), 0, total=total_bytes)
2448 self.ui.progress(_('clone'), 0, total=total_bytes)
2449 start = time.time()
2449 start = time.time()
2450 for i in xrange(total_files):
2450 for i in xrange(total_files):
2451 # XXX doesn't support '\n' or '\r' in filenames
2451 # XXX doesn't support '\n' or '\r' in filenames
2452 l = fp.readline()
2452 l = fp.readline()
2453 try:
2453 try:
2454 name, size = l.split('\0', 1)
2454 name, size = l.split('\0', 1)
2455 size = int(size)
2455 size = int(size)
2456 except (ValueError, TypeError):
2456 except (ValueError, TypeError):
2457 raise error.ResponseError(
2457 raise error.ResponseError(
2458 _('unexpected response from remote server:'), l)
2458 _('unexpected response from remote server:'), l)
2459 if self.ui.debugflag:
2459 if self.ui.debugflag:
2460 self.ui.debug('adding %s (%s)\n' %
2460 self.ui.debug('adding %s (%s)\n' %
2461 (name, util.bytecount(size)))
2461 (name, util.bytecount(size)))
2462 # for backwards compat, name was partially encoded
2462 # for backwards compat, name was partially encoded
2463 ofp = self.sopener(store.decodedir(name), 'w')
2463 ofp = self.sopener(store.decodedir(name), 'w')
2464 for chunk in util.filechunkiter(fp, limit=size):
2464 for chunk in util.filechunkiter(fp, limit=size):
2465 handled_bytes += len(chunk)
2465 handled_bytes += len(chunk)
2466 self.ui.progress(_('clone'), handled_bytes,
2466 self.ui.progress(_('clone'), handled_bytes,
2467 total=total_bytes)
2467 total=total_bytes)
2468 ofp.write(chunk)
2468 ofp.write(chunk)
2469 ofp.close()
2469 ofp.close()
2470 elapsed = time.time() - start
2470 elapsed = time.time() - start
2471 if elapsed <= 0:
2471 if elapsed <= 0:
2472 elapsed = 0.001
2472 elapsed = 0.001
2473 self.ui.progress(_('clone'), None)
2473 self.ui.progress(_('clone'), None)
2474 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2474 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2475 (util.bytecount(total_bytes), elapsed,
2475 (util.bytecount(total_bytes), elapsed,
2476 util.bytecount(total_bytes / elapsed)))
2476 util.bytecount(total_bytes / elapsed)))
2477
2477
2478 # new requirements = old non-format requirements +
2478 # new requirements = old non-format requirements +
2479 # new format-related
2479 # new format-related
2480 # requirements from the streamed-in repository
2480 # requirements from the streamed-in repository
2481 requirements.update(set(self.requirements) - self.supportedformats)
2481 requirements.update(set(self.requirements) - self.supportedformats)
2482 self._applyrequirements(requirements)
2482 self._applyrequirements(requirements)
2483 self._writerequirements()
2483 self._writerequirements()
2484
2484
2485 self.invalidate()
2485 self.invalidate()
2486 return len(self.heads()) + 1
2486 return len(self.heads()) + 1
2487 finally:
2487 finally:
2488 lock.release()
2488 lock.release()
2489
2489
2490 def clone(self, remote, heads=[], stream=False):
2490 def clone(self, remote, heads=[], stream=False):
2491 '''clone remote repository.
2491 '''clone remote repository.
2492
2492
2493 keyword arguments:
2493 keyword arguments:
2494 heads: list of revs to clone (forces use of pull)
2494 heads: list of revs to clone (forces use of pull)
2495 stream: use streaming clone if possible'''
2495 stream: use streaming clone if possible'''
2496
2496
2497 # now, all clients that can request uncompressed clones can
2497 # now, all clients that can request uncompressed clones can
2498 # read repo formats supported by all servers that can serve
2498 # read repo formats supported by all servers that can serve
2499 # them.
2499 # them.
2500
2500
2501 # if revlog format changes, client will have to check version
2501 # if revlog format changes, client will have to check version
2502 # and format flags on "stream" capability, and use
2502 # and format flags on "stream" capability, and use
2503 # uncompressed only if compatible.
2503 # uncompressed only if compatible.
2504
2504
2505 if not stream:
2505 if not stream:
2506 # if the server explicitely prefer to stream (for fast LANs)
2506 # if the server explicitely prefer to stream (for fast LANs)
2507 stream = remote.capable('stream-preferred')
2507 stream = remote.capable('stream-preferred')
2508
2508
2509 if stream and not heads:
2509 if stream and not heads:
2510 # 'stream' means remote revlog format is revlogv1 only
2510 # 'stream' means remote revlog format is revlogv1 only
2511 if remote.capable('stream'):
2511 if remote.capable('stream'):
2512 return self.stream_in(remote, set(('revlogv1',)))
2512 return self.stream_in(remote, set(('revlogv1',)))
2513 # otherwise, 'streamreqs' contains the remote revlog format
2513 # otherwise, 'streamreqs' contains the remote revlog format
2514 streamreqs = remote.capable('streamreqs')
2514 streamreqs = remote.capable('streamreqs')
2515 if streamreqs:
2515 if streamreqs:
2516 streamreqs = set(streamreqs.split(','))
2516 streamreqs = set(streamreqs.split(','))
2517 # if we support it, stream in and adjust our requirements
2517 # if we support it, stream in and adjust our requirements
2518 if not streamreqs - self.supportedformats:
2518 if not streamreqs - self.supportedformats:
2519 return self.stream_in(remote, streamreqs)
2519 return self.stream_in(remote, streamreqs)
2520 return self.pull(remote, heads)
2520 return self.pull(remote, heads)
2521
2521
2522 def pushkey(self, namespace, key, old, new):
2522 def pushkey(self, namespace, key, old, new):
2523 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2523 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2524 old=old, new=new)
2524 old=old, new=new)
2525 ret = pushkey.push(self, namespace, key, old, new)
2525 ret = pushkey.push(self, namespace, key, old, new)
2526 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2526 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2527 ret=ret)
2527 ret=ret)
2528 return ret
2528 return ret
2529
2529
2530 def listkeys(self, namespace):
2530 def listkeys(self, namespace):
2531 self.hook('prelistkeys', throw=True, namespace=namespace)
2531 self.hook('prelistkeys', throw=True, namespace=namespace)
2532 values = pushkey.list(self, namespace)
2532 values = pushkey.list(self, namespace)
2533 self.hook('listkeys', namespace=namespace, values=values)
2533 self.hook('listkeys', namespace=namespace, values=values)
2534 return values
2534 return values
2535
2535
2536 def debugwireargs(self, one, two, three=None, four=None, five=None):
2536 def debugwireargs(self, one, two, three=None, four=None, five=None):
2537 '''used to test argument passing over the wire'''
2537 '''used to test argument passing over the wire'''
2538 return "%s %s %s %s %s" % (one, two, three, four, five)
2538 return "%s %s %s %s %s" % (one, two, three, four, five)
2539
2539
2540 def savecommitmessage(self, text):
2540 def savecommitmessage(self, text):
2541 fp = self.opener('last-message.txt', 'wb')
2541 fp = self.opener('last-message.txt', 'wb')
2542 try:
2542 try:
2543 fp.write(text)
2543 fp.write(text)
2544 finally:
2544 finally:
2545 fp.close()
2545 fp.close()
2546 return self.pathto(fp.name[len(self.root)+1:])
2546 return self.pathto(fp.name[len(self.root)+1:])
2547
2547
2548 # used to avoid circular references so destructors work
2548 # used to avoid circular references so destructors work
2549 def aftertrans(files):
2549 def aftertrans(files):
2550 renamefiles = [tuple(t) for t in files]
2550 renamefiles = [tuple(t) for t in files]
2551 def a():
2551 def a():
2552 for src, dest in renamefiles:
2552 for src, dest in renamefiles:
2553 try:
2553 try:
2554 util.rename(src, dest)
2554 util.rename(src, dest)
2555 except OSError: # journal file does not yet exist
2555 except OSError: # journal file does not yet exist
2556 pass
2556 pass
2557 return a
2557 return a
2558
2558
2559 def undoname(fn):
2559 def undoname(fn):
2560 base, name = os.path.split(fn)
2560 base, name = os.path.split(fn)
2561 assert name.startswith('journal')
2561 assert name.startswith('journal')
2562 return os.path.join(base, name.replace('journal', 'undo', 1))
2562 return os.path.join(base, name.replace('journal', 'undo', 1))
2563
2563
2564 def instance(ui, path, create):
2564 def instance(ui, path, create):
2565 return localrepository(ui, util.urllocalpath(path), create)
2565 return localrepository(ui, util.urllocalpath(path), create)
2566
2566
2567 def islocal(path):
2567 def islocal(path):
2568 return True
2568 return True
@@ -1,200 +1,199
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 #
2 #
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from node import nullid
9 from node import nullid
10 from i18n import _
10 from i18n import _
11 import random, util, dagutil
11 import random, util, dagutil
12 import phases
13
12
14 def _updatesample(dag, nodes, sample, always, quicksamplesize=0):
13 def _updatesample(dag, nodes, sample, always, quicksamplesize=0):
15 # if nodes is empty we scan the entire graph
14 # if nodes is empty we scan the entire graph
16 if nodes:
15 if nodes:
17 heads = dag.headsetofconnecteds(nodes)
16 heads = dag.headsetofconnecteds(nodes)
18 else:
17 else:
19 heads = dag.heads()
18 heads = dag.heads()
20 dist = {}
19 dist = {}
21 visit = util.deque(heads)
20 visit = util.deque(heads)
22 seen = set()
21 seen = set()
23 factor = 1
22 factor = 1
24 while visit:
23 while visit:
25 curr = visit.popleft()
24 curr = visit.popleft()
26 if curr in seen:
25 if curr in seen:
27 continue
26 continue
28 d = dist.setdefault(curr, 1)
27 d = dist.setdefault(curr, 1)
29 if d > factor:
28 if d > factor:
30 factor *= 2
29 factor *= 2
31 if d == factor:
30 if d == factor:
32 if curr not in always: # need this check for the early exit below
31 if curr not in always: # need this check for the early exit below
33 sample.add(curr)
32 sample.add(curr)
34 if quicksamplesize and (len(sample) >= quicksamplesize):
33 if quicksamplesize and (len(sample) >= quicksamplesize):
35 return
34 return
36 seen.add(curr)
35 seen.add(curr)
37 for p in dag.parents(curr):
36 for p in dag.parents(curr):
38 if not nodes or p in nodes:
37 if not nodes or p in nodes:
39 dist.setdefault(p, d + 1)
38 dist.setdefault(p, d + 1)
40 visit.append(p)
39 visit.append(p)
41
40
42 def _setupsample(dag, nodes, size):
41 def _setupsample(dag, nodes, size):
43 if len(nodes) <= size:
42 if len(nodes) <= size:
44 return set(nodes), None, 0
43 return set(nodes), None, 0
45 always = dag.headsetofconnecteds(nodes)
44 always = dag.headsetofconnecteds(nodes)
46 desiredlen = size - len(always)
45 desiredlen = size - len(always)
47 if desiredlen <= 0:
46 if desiredlen <= 0:
48 # This could be bad if there are very many heads, all unknown to the
47 # This could be bad if there are very many heads, all unknown to the
49 # server. We're counting on long request support here.
48 # server. We're counting on long request support here.
50 return always, None, desiredlen
49 return always, None, desiredlen
51 return always, set(), desiredlen
50 return always, set(), desiredlen
52
51
53 def _takequicksample(dag, nodes, size, initial):
52 def _takequicksample(dag, nodes, size, initial):
54 always, sample, desiredlen = _setupsample(dag, nodes, size)
53 always, sample, desiredlen = _setupsample(dag, nodes, size)
55 if sample is None:
54 if sample is None:
56 return always
55 return always
57 if initial:
56 if initial:
58 fromset = None
57 fromset = None
59 else:
58 else:
60 fromset = nodes
59 fromset = nodes
61 _updatesample(dag, fromset, sample, always, quicksamplesize=desiredlen)
60 _updatesample(dag, fromset, sample, always, quicksamplesize=desiredlen)
62 sample.update(always)
61 sample.update(always)
63 return sample
62 return sample
64
63
65 def _takefullsample(dag, nodes, size):
64 def _takefullsample(dag, nodes, size):
66 always, sample, desiredlen = _setupsample(dag, nodes, size)
65 always, sample, desiredlen = _setupsample(dag, nodes, size)
67 if sample is None:
66 if sample is None:
68 return always
67 return always
69 # update from heads
68 # update from heads
70 _updatesample(dag, nodes, sample, always)
69 _updatesample(dag, nodes, sample, always)
71 # update from roots
70 # update from roots
72 _updatesample(dag.inverse(), nodes, sample, always)
71 _updatesample(dag.inverse(), nodes, sample, always)
73 assert sample
72 assert sample
74 if len(sample) > desiredlen:
73 if len(sample) > desiredlen:
75 sample = set(random.sample(sample, desiredlen))
74 sample = set(random.sample(sample, desiredlen))
76 elif len(sample) < desiredlen:
75 elif len(sample) < desiredlen:
77 more = desiredlen - len(sample)
76 more = desiredlen - len(sample)
78 sample.update(random.sample(list(nodes - sample - always), more))
77 sample.update(random.sample(list(nodes - sample - always), more))
79 sample.update(always)
78 sample.update(always)
80 return sample
79 return sample
81
80
82 def findcommonheads(ui, local, remote,
81 def findcommonheads(ui, local, remote,
83 initialsamplesize=100,
82 initialsamplesize=100,
84 fullsamplesize=200,
83 fullsamplesize=200,
85 abortwhenunrelated=True):
84 abortwhenunrelated=True):
86 '''Return a tuple (common, anyincoming, remoteheads) used to identify
85 '''Return a tuple (common, anyincoming, remoteheads) used to identify
87 missing nodes from or in remote.
86 missing nodes from or in remote.
88
87
89 shortcutlocal determines whether we try use direct access to localrepo if
88 shortcutlocal determines whether we try use direct access to localrepo if
90 remote is actually local.
89 remote is actually local.
91 '''
90 '''
92 roundtrips = 0
91 roundtrips = 0
93 cl = local.changelog
92 cl = local.changelog
94 dag = dagutil.revlogdag(cl)
93 dag = dagutil.revlogdag(cl)
95
94
96 # early exit if we know all the specified remote heads already
95 # early exit if we know all the specified remote heads already
97 ui.debug("query 1; heads\n")
96 ui.debug("query 1; heads\n")
98 roundtrips += 1
97 roundtrips += 1
99 ownheads = dag.heads()
98 ownheads = dag.heads()
100 sample = ownheads
99 sample = ownheads
101 if remote.local():
100 if remote.local():
102 # stopgap until we have a proper localpeer that supports batch()
101 # stopgap until we have a proper localpeer that supports batch()
103 srvheadhashes = phases.visibleheads(remote.local())
102 srvheadhashes = remote.heads()
104 yesno = remote.known(dag.externalizeall(sample))
103 yesno = remote.known(dag.externalizeall(sample))
105 elif remote.capable('batch'):
104 elif remote.capable('batch'):
106 batch = remote.batch()
105 batch = remote.batch()
107 srvheadhashesref = batch.heads()
106 srvheadhashesref = batch.heads()
108 yesnoref = batch.known(dag.externalizeall(sample))
107 yesnoref = batch.known(dag.externalizeall(sample))
109 batch.submit()
108 batch.submit()
110 srvheadhashes = srvheadhashesref.value
109 srvheadhashes = srvheadhashesref.value
111 yesno = yesnoref.value
110 yesno = yesnoref.value
112 else:
111 else:
113 # compatibitity with pre-batch, but post-known remotes during 1.9 devel
112 # compatibitity with pre-batch, but post-known remotes during 1.9 devel
114 srvheadhashes = remote.heads()
113 srvheadhashes = remote.heads()
115 sample = []
114 sample = []
116
115
117 if cl.tip() == nullid:
116 if cl.tip() == nullid:
118 if srvheadhashes != [nullid]:
117 if srvheadhashes != [nullid]:
119 return [nullid], True, srvheadhashes
118 return [nullid], True, srvheadhashes
120 return [nullid], False, []
119 return [nullid], False, []
121
120
122 # start actual discovery (we note this before the next "if" for
121 # start actual discovery (we note this before the next "if" for
123 # compatibility reasons)
122 # compatibility reasons)
124 ui.status(_("searching for changes\n"))
123 ui.status(_("searching for changes\n"))
125
124
126 srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
125 srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
127 if len(srvheads) == len(srvheadhashes):
126 if len(srvheads) == len(srvheadhashes):
128 ui.debug("all remote heads known locally\n")
127 ui.debug("all remote heads known locally\n")
129 return (srvheadhashes, False, srvheadhashes,)
128 return (srvheadhashes, False, srvheadhashes,)
130
129
131 if sample and util.all(yesno):
130 if sample and util.all(yesno):
132 ui.note(_("all local heads known remotely\n"))
131 ui.note(_("all local heads known remotely\n"))
133 ownheadhashes = dag.externalizeall(ownheads)
132 ownheadhashes = dag.externalizeall(ownheads)
134 return (ownheadhashes, True, srvheadhashes,)
133 return (ownheadhashes, True, srvheadhashes,)
135
134
136 # full blown discovery
135 # full blown discovery
137
136
138 # own nodes where I don't know if remote knows them
137 # own nodes where I don't know if remote knows them
139 undecided = dag.nodeset()
138 undecided = dag.nodeset()
140 # own nodes I know we both know
139 # own nodes I know we both know
141 common = set()
140 common = set()
142 # own nodes I know remote lacks
141 # own nodes I know remote lacks
143 missing = set()
142 missing = set()
144
143
145 # treat remote heads (and maybe own heads) as a first implicit sample
144 # treat remote heads (and maybe own heads) as a first implicit sample
146 # response
145 # response
147 common.update(dag.ancestorset(srvheads))
146 common.update(dag.ancestorset(srvheads))
148 undecided.difference_update(common)
147 undecided.difference_update(common)
149
148
150 full = False
149 full = False
151 while undecided:
150 while undecided:
152
151
153 if sample:
152 if sample:
154 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
153 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
155 common.update(dag.ancestorset(commoninsample, common))
154 common.update(dag.ancestorset(commoninsample, common))
156
155
157 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
156 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
158 missing.update(dag.descendantset(missinginsample, missing))
157 missing.update(dag.descendantset(missinginsample, missing))
159
158
160 undecided.difference_update(missing)
159 undecided.difference_update(missing)
161 undecided.difference_update(common)
160 undecided.difference_update(common)
162
161
163 if not undecided:
162 if not undecided:
164 break
163 break
165
164
166 if full:
165 if full:
167 ui.note(_("sampling from both directions\n"))
166 ui.note(_("sampling from both directions\n"))
168 sample = _takefullsample(dag, undecided, size=fullsamplesize)
167 sample = _takefullsample(dag, undecided, size=fullsamplesize)
169 elif common:
168 elif common:
170 # use cheapish initial sample
169 # use cheapish initial sample
171 ui.debug("taking initial sample\n")
170 ui.debug("taking initial sample\n")
172 sample = _takefullsample(dag, undecided, size=fullsamplesize)
171 sample = _takefullsample(dag, undecided, size=fullsamplesize)
173 else:
172 else:
174 # use even cheaper initial sample
173 # use even cheaper initial sample
175 ui.debug("taking quick initial sample\n")
174 ui.debug("taking quick initial sample\n")
176 sample = _takequicksample(dag, undecided, size=initialsamplesize,
175 sample = _takequicksample(dag, undecided, size=initialsamplesize,
177 initial=True)
176 initial=True)
178
177
179 roundtrips += 1
178 roundtrips += 1
180 ui.progress(_('searching'), roundtrips, unit=_('queries'))
179 ui.progress(_('searching'), roundtrips, unit=_('queries'))
181 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
180 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
182 % (roundtrips, len(undecided), len(sample)))
181 % (roundtrips, len(undecided), len(sample)))
183 # indices between sample and externalized version must match
182 # indices between sample and externalized version must match
184 sample = list(sample)
183 sample = list(sample)
185 yesno = remote.known(dag.externalizeall(sample))
184 yesno = remote.known(dag.externalizeall(sample))
186 full = True
185 full = True
187
186
188 result = dag.headsetofconnecteds(common)
187 result = dag.headsetofconnecteds(common)
189 ui.progress(_('searching'), None)
188 ui.progress(_('searching'), None)
190 ui.debug("%d total queries\n" % roundtrips)
189 ui.debug("%d total queries\n" % roundtrips)
191
190
192 if not result and srvheadhashes != [nullid]:
191 if not result and srvheadhashes != [nullid]:
193 if abortwhenunrelated:
192 if abortwhenunrelated:
194 raise util.Abort(_("repository is unrelated"))
193 raise util.Abort(_("repository is unrelated"))
195 else:
194 else:
196 ui.warn(_("warning: repository is unrelated\n"))
195 ui.warn(_("warning: repository is unrelated\n"))
197 return (set([nullid]), True, srvheadhashes,)
196 return (set([nullid]), True, srvheadhashes,)
198
197
199 anyincoming = (srvheadhashes != [nullid])
198 anyincoming = (srvheadhashes != [nullid])
200 return dag.externalizeall(result), anyincoming, srvheadhashes
199 return dag.externalizeall(result), anyincoming, srvheadhashes
General Comments 0
You need to be logged in to leave comments. Login now