##// END OF EJS Templates
branchmap: extract _updatebranchcache from repo
Pierre-Yves David -
r18120:88990d3e default
parent child Browse files
Show More
@@ -1,52 +1,113 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10
10
11 def read(repo):
11 def read(repo):
12 partial = {}
12 partial = {}
13 try:
13 try:
14 f = repo.opener("cache/branchheads")
14 f = repo.opener("cache/branchheads")
15 lines = f.read().split('\n')
15 lines = f.read().split('\n')
16 f.close()
16 f.close()
17 except (IOError, OSError):
17 except (IOError, OSError):
18 return {}, nullid, nullrev
18 return {}, nullid, nullrev
19
19
20 try:
20 try:
21 last, lrev = lines.pop(0).split(" ", 1)
21 last, lrev = lines.pop(0).split(" ", 1)
22 last, lrev = bin(last), int(lrev)
22 last, lrev = bin(last), int(lrev)
23 if lrev >= len(repo) or repo[lrev].node() != last:
23 if lrev >= len(repo) or repo[lrev].node() != last:
24 # invalidate the cache
24 # invalidate the cache
25 raise ValueError('invalidating branch cache (tip differs)')
25 raise ValueError('invalidating branch cache (tip differs)')
26 for l in lines:
26 for l in lines:
27 if not l:
27 if not l:
28 continue
28 continue
29 node, label = l.split(" ", 1)
29 node, label = l.split(" ", 1)
30 label = encoding.tolocal(label.strip())
30 label = encoding.tolocal(label.strip())
31 if not node in repo:
31 if not node in repo:
32 raise ValueError('invalidating branch cache because node '+
32 raise ValueError('invalidating branch cache because node '+
33 '%s does not exist' % node)
33 '%s does not exist' % node)
34 partial.setdefault(label, []).append(bin(node))
34 partial.setdefault(label, []).append(bin(node))
35 except KeyboardInterrupt:
35 except KeyboardInterrupt:
36 raise
36 raise
37 except Exception, inst:
37 except Exception, inst:
38 if repo.ui.debugflag:
38 if repo.ui.debugflag:
39 repo.ui.warn(str(inst), '\n')
39 repo.ui.warn(str(inst), '\n')
40 partial, last, lrev = {}, nullid, nullrev
40 partial, last, lrev = {}, nullid, nullrev
41 return partial, last, lrev
41 return partial, last, lrev
42
42
43 def write(repo, branches, tip, tiprev):
43 def write(repo, branches, tip, tiprev):
44 try:
44 try:
45 f = repo.opener("cache/branchheads", "w", atomictemp=True)
45 f = repo.opener("cache/branchheads", "w", atomictemp=True)
46 f.write("%s %s\n" % (hex(tip), tiprev))
46 f.write("%s %s\n" % (hex(tip), tiprev))
47 for label, nodes in branches.iteritems():
47 for label, nodes in branches.iteritems():
48 for node in nodes:
48 for node in nodes:
49 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
49 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
50 f.close()
50 f.close()
51 except (IOError, OSError):
51 except (IOError, OSError):
52 pass
52 pass
53
54 def update(repo, partial, ctxgen):
55 """Given a branchhead cache, partial, that may have extra nodes or be
56 missing heads, and a generator of nodes that are at least a superset of
57 heads missing, this function updates partial to be correct.
58 """
59 # collect new branch entries
60 newbranches = {}
61 for c in ctxgen:
62 newbranches.setdefault(c.branch(), []).append(c.node())
63 # if older branchheads are reachable from new ones, they aren't
64 # really branchheads. Note checking parents is insufficient:
65 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
66 for branch, newnodes in newbranches.iteritems():
67 bheads = partial.setdefault(branch, [])
68 # Remove candidate heads that no longer are in the repo (e.g., as
69 # the result of a strip that just happened). Avoid using 'node in
70 # self' here because that dives down into branchcache code somewhat
71 # recursively.
72 bheadrevs = [repo.changelog.rev(node) for node in bheads
73 if repo.changelog.hasnode(node)]
74 newheadrevs = [repo.changelog.rev(node) for node in newnodes
75 if repo.changelog.hasnode(node)]
76 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
77 # Remove duplicates - nodes that are in newheadrevs and are already
78 # in bheadrevs. This can happen if you strip a node whose parent
79 # was already a head (because they're on different branches).
80 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
81
82 # Starting from tip means fewer passes over reachable. If we know
83 # the new candidates are not ancestors of existing heads, we don't
84 # have to examine ancestors of existing heads
85 if ctxisnew:
86 iterrevs = sorted(newheadrevs)
87 else:
88 iterrevs = list(bheadrevs)
89
90 # This loop prunes out two kinds of heads - heads that are
91 # superseded by a head in newheadrevs, and newheadrevs that are not
92 # heads because an existing head is their descendant.
93 while iterrevs:
94 latest = iterrevs.pop()
95 if latest not in bheadrevs:
96 continue
97 ancestors = set(repo.changelog.ancestors([latest],
98 bheadrevs[0]))
99 if ancestors:
100 bheadrevs = [b for b in bheadrevs if b not in ancestors]
101 partial[branch] = [repo.changelog.node(rev) for rev in bheadrevs]
102
103 # There may be branches that cease to exist when the last commit in the
104 # branch was stripped. This code filters them out. Note that the
105 # branch that ceased to exist may not be in newbranches because
106 # newbranches is the set of candidate heads, which when you strip the
107 # last commit in a branch will be the parent branch.
108 for branch in partial.keys():
109 nodes = [head for head in partial[branch]
110 if repo.changelog.hasnode(head)]
111 if not nodes:
112 del partial[branch]
113
@@ -1,346 +1,347 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
11 import branchmap
11
12
12 def findcommonincoming(repo, remote, heads=None, force=False):
13 def findcommonincoming(repo, remote, heads=None, force=False):
13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 """Return a tuple (common, anyincoming, heads) used to identify the common
14 subset of nodes between repo and remote.
15 subset of nodes between repo and remote.
15
16
16 "common" is a list of (at least) the heads of the common subset.
17 "common" is a list of (at least) the heads of the common subset.
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 locally. If remote does not support getbundle, this actually is a list of
19 locally. If remote does not support getbundle, this actually is a list of
19 roots of the nodes that would be incoming, to be supplied to
20 roots of the nodes that would be incoming, to be supplied to
20 changegroupsubset. No code except for pull should be relying on this fact
21 changegroupsubset. No code except for pull should be relying on this fact
21 any longer.
22 any longer.
22 "heads" is either the supplied heads, or else the remote's heads.
23 "heads" is either the supplied heads, or else the remote's heads.
23
24
24 If you pass heads and they are all known locally, the response lists just
25 If you pass heads and they are all known locally, the response lists just
25 these heads in "common" and in "heads".
26 these heads in "common" and in "heads".
26
27
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 extensions a good hook into outgoing.
29 extensions a good hook into outgoing.
29 """
30 """
30
31
31 if not remote.capable('getbundle'):
32 if not remote.capable('getbundle'):
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 return treediscovery.findcommonincoming(repo, remote, heads, force)
33
34
34 if heads:
35 if heads:
35 allknown = True
36 allknown = True
36 nm = repo.changelog.nodemap
37 nm = repo.changelog.nodemap
37 for h in heads:
38 for h in heads:
38 if nm.get(h) is None:
39 if nm.get(h) is None:
39 allknown = False
40 allknown = False
40 break
41 break
41 if allknown:
42 if allknown:
42 return (heads, False, heads)
43 return (heads, False, heads)
43
44
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 abortwhenunrelated=not force)
46 abortwhenunrelated=not force)
46 common, anyinc, srvheads = res
47 common, anyinc, srvheads = res
47 return (list(common), anyinc, heads or list(srvheads))
48 return (list(common), anyinc, heads or list(srvheads))
48
49
49 class outgoing(object):
50 class outgoing(object):
50 '''Represents the set of nodes present in a local repo but not in a
51 '''Represents the set of nodes present in a local repo but not in a
51 (possibly) remote one.
52 (possibly) remote one.
52
53
53 Members:
54 Members:
54
55
55 missing is a list of all nodes present in local but not in remote.
56 missing is a list of all nodes present in local but not in remote.
56 common is a list of all nodes shared between the two repos.
57 common is a list of all nodes shared between the two repos.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 excluded is the list of missing changeset that shouldn't be sent remotely.
58 missingheads is the list of heads of missing.
59 missingheads is the list of heads of missing.
59 commonheads is the list of heads of common.
60 commonheads is the list of heads of common.
60
61
61 The sets are computed on demand from the heads, unless provided upfront
62 The sets are computed on demand from the heads, unless provided upfront
62 by discovery.'''
63 by discovery.'''
63
64
64 def __init__(self, revlog, commonheads, missingheads):
65 def __init__(self, revlog, commonheads, missingheads):
65 self.commonheads = commonheads
66 self.commonheads = commonheads
66 self.missingheads = missingheads
67 self.missingheads = missingheads
67 self._revlog = revlog
68 self._revlog = revlog
68 self._common = None
69 self._common = None
69 self._missing = None
70 self._missing = None
70 self.excluded = []
71 self.excluded = []
71
72
72 def _computecommonmissing(self):
73 def _computecommonmissing(self):
73 sets = self._revlog.findcommonmissing(self.commonheads,
74 sets = self._revlog.findcommonmissing(self.commonheads,
74 self.missingheads)
75 self.missingheads)
75 self._common, self._missing = sets
76 self._common, self._missing = sets
76
77
77 @util.propertycache
78 @util.propertycache
78 def common(self):
79 def common(self):
79 if self._common is None:
80 if self._common is None:
80 self._computecommonmissing()
81 self._computecommonmissing()
81 return self._common
82 return self._common
82
83
83 @util.propertycache
84 @util.propertycache
84 def missing(self):
85 def missing(self):
85 if self._missing is None:
86 if self._missing is None:
86 self._computecommonmissing()
87 self._computecommonmissing()
87 return self._missing
88 return self._missing
88
89
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 commoninc=None, portable=False):
91 commoninc=None, portable=False):
91 '''Return an outgoing instance to identify the nodes present in repo but
92 '''Return an outgoing instance to identify the nodes present in repo but
92 not in other.
93 not in other.
93
94
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 (inclusive) are included. If you already know the local repo's heads,
96 (inclusive) are included. If you already know the local repo's heads,
96 passing them in onlyheads is faster than letting them be recomputed here.
97 passing them in onlyheads is faster than letting them be recomputed here.
97
98
98 If commoninc is given, it must be the result of a prior call to
99 If commoninc is given, it must be the result of a prior call to
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100 findcommonincoming(repo, other, force) to avoid recomputing it here.
100
101
101 If portable is given, compute more conservative common and missingheads,
102 If portable is given, compute more conservative common and missingheads,
102 to make bundles created from the instance more portable.'''
103 to make bundles created from the instance more portable.'''
103 # declare an empty outgoing object to be filled later
104 # declare an empty outgoing object to be filled later
104 og = outgoing(repo.changelog, None, None)
105 og = outgoing(repo.changelog, None, None)
105
106
106 # get common set if not provided
107 # get common set if not provided
107 if commoninc is None:
108 if commoninc is None:
108 commoninc = findcommonincoming(repo, other, force=force)
109 commoninc = findcommonincoming(repo, other, force=force)
109 og.commonheads, _any, _hds = commoninc
110 og.commonheads, _any, _hds = commoninc
110
111
111 # compute outgoing
112 # compute outgoing
112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 if not mayexclude:
114 if not mayexclude:
114 og.missingheads = onlyheads or repo.heads()
115 og.missingheads = onlyheads or repo.heads()
115 elif onlyheads is None:
116 elif onlyheads is None:
116 # use visible heads as it should be cached
117 # use visible heads as it should be cached
117 og.missingheads = visibleheads(repo)
118 og.missingheads = visibleheads(repo)
118 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
119 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
119 else:
120 else:
120 # compute common, missing and exclude secret stuff
121 # compute common, missing and exclude secret stuff
121 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
122 og._common, allmissing = sets
123 og._common, allmissing = sets
123 og._missing = missing = []
124 og._missing = missing = []
124 og.excluded = excluded = []
125 og.excluded = excluded = []
125 for node in allmissing:
126 for node in allmissing:
126 ctx = repo[node]
127 ctx = repo[node]
127 if ctx.phase() >= phases.secret or ctx.extinct():
128 if ctx.phase() >= phases.secret or ctx.extinct():
128 excluded.append(node)
129 excluded.append(node)
129 else:
130 else:
130 missing.append(node)
131 missing.append(node)
131 if len(missing) == len(allmissing):
132 if len(missing) == len(allmissing):
132 missingheads = onlyheads
133 missingheads = onlyheads
133 else: # update missing heads
134 else: # update missing heads
134 missingheads = phases.newheads(repo, onlyheads, excluded)
135 missingheads = phases.newheads(repo, onlyheads, excluded)
135 og.missingheads = missingheads
136 og.missingheads = missingheads
136 if portable:
137 if portable:
137 # recompute common and missingheads as if -r<rev> had been given for
138 # recompute common and missingheads as if -r<rev> had been given for
138 # each head of missing, and --base <rev> for each head of the proper
139 # each head of missing, and --base <rev> for each head of the proper
139 # ancestors of missing
140 # ancestors of missing
140 og._computecommonmissing()
141 og._computecommonmissing()
141 cl = repo.changelog
142 cl = repo.changelog
142 missingrevs = set(cl.rev(n) for n in og._missing)
143 missingrevs = set(cl.rev(n) for n in og._missing)
143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
144 og._common = set(cl.ancestors(missingrevs)) - missingrevs
144 commonheads = set(og.commonheads)
145 commonheads = set(og.commonheads)
145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
146 og.missingheads = [h for h in og.missingheads if h not in commonheads]
146
147
147 return og
148 return og
148
149
149 def _headssummary(repo, remote, outgoing):
150 def _headssummary(repo, remote, outgoing):
150 """compute a summary of branch and heads status before and after push
151 """compute a summary of branch and heads status before and after push
151
152
152 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
153 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
153
154
154 - branch: the branch name
155 - branch: the branch name
155 - remoteheads: the list of remote heads known locally
156 - remoteheads: the list of remote heads known locally
156 None is the branch is new
157 None is the branch is new
157 - newheads: the new remote heads (known locally) with outgoing pushed
158 - newheads: the new remote heads (known locally) with outgoing pushed
158 - unsyncedheads: the list of remote heads unknown locally.
159 - unsyncedheads: the list of remote heads unknown locally.
159 """
160 """
160 cl = repo.changelog
161 cl = repo.changelog
161 headssum = {}
162 headssum = {}
162 # A. Create set of branches involved in the push.
163 # A. Create set of branches involved in the push.
163 branches = set(repo[n].branch() for n in outgoing.missing)
164 branches = set(repo[n].branch() for n in outgoing.missing)
164 remotemap = remote.branchmap()
165 remotemap = remote.branchmap()
165 newbranches = branches - set(remotemap)
166 newbranches = branches - set(remotemap)
166 branches.difference_update(newbranches)
167 branches.difference_update(newbranches)
167
168
168 # A. register remote heads
169 # A. register remote heads
169 remotebranches = set()
170 remotebranches = set()
170 for branch, heads in remote.branchmap().iteritems():
171 for branch, heads in remote.branchmap().iteritems():
171 remotebranches.add(branch)
172 remotebranches.add(branch)
172 known = []
173 known = []
173 unsynced = []
174 unsynced = []
174 for h in heads:
175 for h in heads:
175 if h in cl.nodemap:
176 if h in cl.nodemap:
176 known.append(h)
177 known.append(h)
177 else:
178 else:
178 unsynced.append(h)
179 unsynced.append(h)
179 headssum[branch] = (known, list(known), unsynced)
180 headssum[branch] = (known, list(known), unsynced)
180 # B. add new branch data
181 # B. add new branch data
181 missingctx = list(repo[n] for n in outgoing.missing)
182 missingctx = list(repo[n] for n in outgoing.missing)
182 touchedbranches = set()
183 touchedbranches = set()
183 for ctx in missingctx:
184 for ctx in missingctx:
184 branch = ctx.branch()
185 branch = ctx.branch()
185 touchedbranches.add(branch)
186 touchedbranches.add(branch)
186 if branch not in headssum:
187 if branch not in headssum:
187 headssum[branch] = (None, [], [])
188 headssum[branch] = (None, [], [])
188
189
189 # C drop data about untouched branches:
190 # C drop data about untouched branches:
190 for branch in remotebranches - touchedbranches:
191 for branch in remotebranches - touchedbranches:
191 del headssum[branch]
192 del headssum[branch]
192
193
193 # D. Update newmap with outgoing changes.
194 # D. Update newmap with outgoing changes.
194 # This will possibly add new heads and remove existing ones.
195 # This will possibly add new heads and remove existing ones.
195 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
196 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
196 if heads[0] is not None)
197 if heads[0] is not None)
197 repo._updatebranchcache(newmap, missingctx)
198 branchmap.update(repo, newmap, missingctx)
198 for branch, newheads in newmap.iteritems():
199 for branch, newheads in newmap.iteritems():
199 headssum[branch][1][:] = newheads
200 headssum[branch][1][:] = newheads
200 return headssum
201 return headssum
201
202
202 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
203 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
203 """Compute branchmapsummary for repo without branchmap support"""
204 """Compute branchmapsummary for repo without branchmap support"""
204
205
205 cl = repo.changelog
206 cl = repo.changelog
206 # 1-4b. old servers: Check for new topological heads.
207 # 1-4b. old servers: Check for new topological heads.
207 # Construct {old,new}map with branch = None (topological branch).
208 # Construct {old,new}map with branch = None (topological branch).
208 # (code based on _updatebranchcache)
209 # (code based on update)
209 oldheads = set(h for h in remoteheads if h in cl.nodemap)
210 oldheads = set(h for h in remoteheads if h in cl.nodemap)
210 # all nodes in outgoing.missing are children of either:
211 # all nodes in outgoing.missing are children of either:
211 # - an element of oldheads
212 # - an element of oldheads
212 # - another element of outgoing.missing
213 # - another element of outgoing.missing
213 # - nullrev
214 # - nullrev
214 # This explains why the new head are very simple to compute.
215 # This explains why the new head are very simple to compute.
215 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
216 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
216 newheads = list(c.node() for c in r)
217 newheads = list(c.node() for c in r)
217 unsynced = inc and set([None]) or set()
218 unsynced = inc and set([None]) or set()
218 return {None: (oldheads, newheads, unsynced)}
219 return {None: (oldheads, newheads, unsynced)}
219
220
220 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
221 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
221 """Check that a push won't add any outgoing head
222 """Check that a push won't add any outgoing head
222
223
223 raise Abort error and display ui message as needed.
224 raise Abort error and display ui message as needed.
224 """
225 """
225 # Check for each named branch if we're creating new remote heads.
226 # Check for each named branch if we're creating new remote heads.
226 # To be a remote head after push, node must be either:
227 # To be a remote head after push, node must be either:
227 # - unknown locally
228 # - unknown locally
228 # - a local outgoing head descended from update
229 # - a local outgoing head descended from update
229 # - a remote head that's known locally and not
230 # - a remote head that's known locally and not
230 # ancestral to an outgoing head
231 # ancestral to an outgoing head
231 if remoteheads == [nullid]:
232 if remoteheads == [nullid]:
232 # remote is empty, nothing to check.
233 # remote is empty, nothing to check.
233 return
234 return
234
235
235 if remote.capable('branchmap'):
236 if remote.capable('branchmap'):
236 headssum = _headssummary(repo, remote, outgoing)
237 headssum = _headssummary(repo, remote, outgoing)
237 else:
238 else:
238 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
239 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
239 newbranches = [branch for branch, heads in headssum.iteritems()
240 newbranches = [branch for branch, heads in headssum.iteritems()
240 if heads[0] is None]
241 if heads[0] is None]
241 # 1. Check for new branches on the remote.
242 # 1. Check for new branches on the remote.
242 if newbranches and not newbranch: # new branch requires --new-branch
243 if newbranches and not newbranch: # new branch requires --new-branch
243 branchnames = ', '.join(sorted(newbranches))
244 branchnames = ', '.join(sorted(newbranches))
244 raise util.Abort(_("push creates new remote branches: %s!")
245 raise util.Abort(_("push creates new remote branches: %s!")
245 % branchnames,
246 % branchnames,
246 hint=_("use 'hg push --new-branch' to create"
247 hint=_("use 'hg push --new-branch' to create"
247 " new remote branches"))
248 " new remote branches"))
248
249
249 # 2 compute newly pushed bookmarks. We
250 # 2 compute newly pushed bookmarks. We
250 # we don't warned about bookmarked heads.
251 # we don't warned about bookmarked heads.
251 localbookmarks = repo._bookmarks
252 localbookmarks = repo._bookmarks
252 remotebookmarks = remote.listkeys('bookmarks')
253 remotebookmarks = remote.listkeys('bookmarks')
253 bookmarkedheads = set()
254 bookmarkedheads = set()
254 for bm in localbookmarks:
255 for bm in localbookmarks:
255 rnode = remotebookmarks.get(bm)
256 rnode = remotebookmarks.get(bm)
256 if rnode and rnode in repo:
257 if rnode and rnode in repo:
257 lctx, rctx = repo[bm], repo[rnode]
258 lctx, rctx = repo[bm], repo[rnode]
258 if bookmarks.validdest(repo, rctx, lctx):
259 if bookmarks.validdest(repo, rctx, lctx):
259 bookmarkedheads.add(lctx.node())
260 bookmarkedheads.add(lctx.node())
260
261
261 # 3. Check for new heads.
262 # 3. Check for new heads.
262 # If there are more heads after the push than before, a suitable
263 # If there are more heads after the push than before, a suitable
263 # error message, depending on unsynced status, is displayed.
264 # error message, depending on unsynced status, is displayed.
264 error = None
265 error = None
265 unsynced = False
266 unsynced = False
266 allmissing = set(outgoing.missing)
267 allmissing = set(outgoing.missing)
267 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
268 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
268 allfuturecommon.update(allmissing)
269 allfuturecommon.update(allmissing)
269 for branch, heads in headssum.iteritems():
270 for branch, heads in headssum.iteritems():
270 if heads[0] is None:
271 if heads[0] is None:
271 # Maybe we should abort if we push more that one head
272 # Maybe we should abort if we push more that one head
272 # for new branches ?
273 # for new branches ?
273 continue
274 continue
274 candidate_newhs = set(heads[1])
275 candidate_newhs = set(heads[1])
275 # add unsynced data
276 # add unsynced data
276 oldhs = set(heads[0])
277 oldhs = set(heads[0])
277 oldhs.update(heads[2])
278 oldhs.update(heads[2])
278 candidate_newhs.update(heads[2])
279 candidate_newhs.update(heads[2])
279 dhs = None
280 dhs = None
280 discardedheads = set()
281 discardedheads = set()
281 if repo.obsstore:
282 if repo.obsstore:
282 # remove future heads which are actually obsolete by another
283 # remove future heads which are actually obsolete by another
283 # pushed element:
284 # pushed element:
284 #
285 #
285 # XXX as above, There are several cases this case does not handle
286 # XXX as above, There are several cases this case does not handle
286 # XXX properly
287 # XXX properly
287 #
288 #
288 # (1) if <nh> is public, it won't be affected by obsolete marker
289 # (1) if <nh> is public, it won't be affected by obsolete marker
289 # and a new is created
290 # and a new is created
290 #
291 #
291 # (2) if the new heads have ancestors which are not obsolete and
292 # (2) if the new heads have ancestors which are not obsolete and
292 # not ancestors of any other heads we will have a new head too.
293 # not ancestors of any other heads we will have a new head too.
293 #
294 #
294 # This two case will be easy to handle for know changeset but much
295 # This two case will be easy to handle for know changeset but much
295 # more tricky for unsynced changes.
296 # more tricky for unsynced changes.
296 newhs = set()
297 newhs = set()
297 for nh in candidate_newhs:
298 for nh in candidate_newhs:
298 if nh in repo and repo[nh].phase() <= phases.public:
299 if nh in repo and repo[nh].phase() <= phases.public:
299 newhs.add(nh)
300 newhs.add(nh)
300 else:
301 else:
301 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
302 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
302 if suc != nh and suc in allfuturecommon:
303 if suc != nh and suc in allfuturecommon:
303 discardedheads.add(nh)
304 discardedheads.add(nh)
304 break
305 break
305 else:
306 else:
306 newhs.add(nh)
307 newhs.add(nh)
307 else:
308 else:
308 newhs = candidate_newhs
309 newhs = candidate_newhs
309 if [h for h in heads[2] if h not in discardedheads]:
310 if [h for h in heads[2] if h not in discardedheads]:
310 unsynced = True
311 unsynced = True
311 if len(newhs) > len(oldhs):
312 if len(newhs) > len(oldhs):
312 # strip updates to existing remote heads from the new heads list
313 # strip updates to existing remote heads from the new heads list
313 dhs = list(newhs - bookmarkedheads - oldhs)
314 dhs = list(newhs - bookmarkedheads - oldhs)
314 if dhs:
315 if dhs:
315 if error is None:
316 if error is None:
316 if branch not in ('default', None):
317 if branch not in ('default', None):
317 error = _("push creates new remote head %s "
318 error = _("push creates new remote head %s "
318 "on branch '%s'!") % (short(dhs[0]), branch)
319 "on branch '%s'!") % (short(dhs[0]), branch)
319 else:
320 else:
320 error = _("push creates new remote head %s!"
321 error = _("push creates new remote head %s!"
321 ) % short(dhs[0])
322 ) % short(dhs[0])
322 if heads[2]: # unsynced
323 if heads[2]: # unsynced
323 hint = _("you should pull and merge or "
324 hint = _("you should pull and merge or "
324 "use push -f to force")
325 "use push -f to force")
325 else:
326 else:
326 hint = _("did you forget to merge? "
327 hint = _("did you forget to merge? "
327 "use push -f to force")
328 "use push -f to force")
328 if branch is not None:
329 if branch is not None:
329 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
330 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
330 for h in dhs:
331 for h in dhs:
331 repo.ui.note(_("new remote head %s\n") % short(h))
332 repo.ui.note(_("new remote head %s\n") % short(h))
332 if error:
333 if error:
333 raise util.Abort(error, hint=hint)
334 raise util.Abort(error, hint=hint)
334
335
335 # 6. Check for unsynced changes on involved branches.
336 # 6. Check for unsynced changes on involved branches.
336 if unsynced:
337 if unsynced:
337 repo.ui.warn(_("note: unsynced remote changes!\n"))
338 repo.ui.warn(_("note: unsynced remote changes!\n"))
338
339
339 def visibleheads(repo):
340 def visibleheads(repo):
340 """return the set of visible head of this repo"""
341 """return the set of visible head of this repo"""
341 return repo.filtered('unserved').heads()
342 return repo.filtered('unserved').heads()
342
343
343
344
344 def visiblebranchmap(repo):
345 def visiblebranchmap(repo):
345 """return a branchmap for the visible set"""
346 """return a branchmap for the visible set"""
346 return repo.filtered('unserved').branchmap()
347 return repo.filtered('unserved').branchmap()
@@ -1,2679 +1,2619 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class repofilecache(filecache):
22 class repofilecache(filecache):
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
24 """
24 """
25
25
26 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 def __set__(self, repo, value):
28 def __set__(self, repo, value):
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 def __delete__(self, repo):
30 def __delete__(self, repo):
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
32
32
33 class storecache(repofilecache):
33 class storecache(repofilecache):
34 """filecache for files in the store"""
34 """filecache for files in the store"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj.sjoin(fname)
36 return obj.sjoin(fname)
37
37
38 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
39 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
40
40
41 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43
43
44 class filteredpropertycache(propertycache):
44 class filteredpropertycache(propertycache):
45 """propertycache that must take filtering in account"""
45 """propertycache that must take filtering in account"""
46
46
47 def cachevalue(self, obj, value):
47 def cachevalue(self, obj, value):
48 object.__setattr__(obj, self.name, value)
48 object.__setattr__(obj, self.name, value)
49
49
50
50
51 def hasunfilteredcache(repo, name):
51 def hasunfilteredcache(repo, name):
52 """check if an repo and a unfilteredproperty cached value for <name>"""
52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 return name in vars(repo.unfiltered())
53 return name in vars(repo.unfiltered())
54
54
55 def unfilteredmethod(orig):
55 def unfilteredmethod(orig):
56 """decorate method that always need to be run on unfiltered version"""
56 """decorate method that always need to be run on unfiltered version"""
57 def wrapper(repo, *args, **kwargs):
57 def wrapper(repo, *args, **kwargs):
58 return orig(repo.unfiltered(), *args, **kwargs)
58 return orig(repo.unfiltered(), *args, **kwargs)
59 return wrapper
59 return wrapper
60
60
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63
63
64 class localpeer(peer.peerrepository):
64 class localpeer(peer.peerrepository):
65 '''peer for a local repo; reflects only the most recent API'''
65 '''peer for a local repo; reflects only the most recent API'''
66
66
67 def __init__(self, repo, caps=MODERNCAPS):
67 def __init__(self, repo, caps=MODERNCAPS):
68 peer.peerrepository.__init__(self)
68 peer.peerrepository.__init__(self)
69 self._repo = repo
69 self._repo = repo
70 self.ui = repo.ui
70 self.ui = repo.ui
71 self._caps = repo._restrictcapabilities(caps)
71 self._caps = repo._restrictcapabilities(caps)
72 self.requirements = repo.requirements
72 self.requirements = repo.requirements
73 self.supportedformats = repo.supportedformats
73 self.supportedformats = repo.supportedformats
74
74
75 def close(self):
75 def close(self):
76 self._repo.close()
76 self._repo.close()
77
77
78 def _capabilities(self):
78 def _capabilities(self):
79 return self._caps
79 return self._caps
80
80
81 def local(self):
81 def local(self):
82 return self._repo
82 return self._repo
83
83
84 def canpush(self):
84 def canpush(self):
85 return True
85 return True
86
86
87 def url(self):
87 def url(self):
88 return self._repo.url()
88 return self._repo.url()
89
89
90 def lookup(self, key):
90 def lookup(self, key):
91 return self._repo.lookup(key)
91 return self._repo.lookup(key)
92
92
93 def branchmap(self):
93 def branchmap(self):
94 return discovery.visiblebranchmap(self._repo)
94 return discovery.visiblebranchmap(self._repo)
95
95
96 def heads(self):
96 def heads(self):
97 return discovery.visibleheads(self._repo)
97 return discovery.visibleheads(self._repo)
98
98
99 def known(self, nodes):
99 def known(self, nodes):
100 return self._repo.known(nodes)
100 return self._repo.known(nodes)
101
101
102 def getbundle(self, source, heads=None, common=None):
102 def getbundle(self, source, heads=None, common=None):
103 return self._repo.getbundle(source, heads=heads, common=common)
103 return self._repo.getbundle(source, heads=heads, common=common)
104
104
105 # TODO We might want to move the next two calls into legacypeer and add
105 # TODO We might want to move the next two calls into legacypeer and add
106 # unbundle instead.
106 # unbundle instead.
107
107
108 def lock(self):
108 def lock(self):
109 return self._repo.lock()
109 return self._repo.lock()
110
110
111 def addchangegroup(self, cg, source, url):
111 def addchangegroup(self, cg, source, url):
112 return self._repo.addchangegroup(cg, source, url)
112 return self._repo.addchangegroup(cg, source, url)
113
113
114 def pushkey(self, namespace, key, old, new):
114 def pushkey(self, namespace, key, old, new):
115 return self._repo.pushkey(namespace, key, old, new)
115 return self._repo.pushkey(namespace, key, old, new)
116
116
117 def listkeys(self, namespace):
117 def listkeys(self, namespace):
118 return self._repo.listkeys(namespace)
118 return self._repo.listkeys(namespace)
119
119
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 '''used to test argument passing over the wire'''
121 '''used to test argument passing over the wire'''
122 return "%s %s %s %s %s" % (one, two, three, four, five)
122 return "%s %s %s %s %s" % (one, two, three, four, five)
123
123
124 class locallegacypeer(localpeer):
124 class locallegacypeer(localpeer):
125 '''peer extension which implements legacy methods too; used for tests with
125 '''peer extension which implements legacy methods too; used for tests with
126 restricted capabilities'''
126 restricted capabilities'''
127
127
128 def __init__(self, repo):
128 def __init__(self, repo):
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130
130
131 def branches(self, nodes):
131 def branches(self, nodes):
132 return self._repo.branches(nodes)
132 return self._repo.branches(nodes)
133
133
134 def between(self, pairs):
134 def between(self, pairs):
135 return self._repo.between(pairs)
135 return self._repo.between(pairs)
136
136
137 def changegroup(self, basenodes, source):
137 def changegroup(self, basenodes, source):
138 return self._repo.changegroup(basenodes, source)
138 return self._repo.changegroup(basenodes, source)
139
139
140 def changegroupsubset(self, bases, heads, source):
140 def changegroupsubset(self, bases, heads, source):
141 return self._repo.changegroupsubset(bases, heads, source)
141 return self._repo.changegroupsubset(bases, heads, source)
142
142
143 class localrepository(object):
143 class localrepository(object):
144
144
145 supportedformats = set(('revlogv1', 'generaldelta'))
145 supportedformats = set(('revlogv1', 'generaldelta'))
146 supported = supportedformats | set(('store', 'fncache', 'shared',
146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 'dotencode'))
147 'dotencode'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
149 requirements = ['revlogv1']
149 requirements = ['revlogv1']
150
150
151 def _baserequirements(self, create):
151 def _baserequirements(self, create):
152 return self.requirements[:]
152 return self.requirements[:]
153
153
154 def __init__(self, baseui, path=None, create=False):
154 def __init__(self, baseui, path=None, create=False):
155 self.wvfs = scmutil.vfs(path, expand=True)
155 self.wvfs = scmutil.vfs(path, expand=True)
156 self.wopener = self.wvfs
156 self.wopener = self.wvfs
157 self.root = self.wvfs.base
157 self.root = self.wvfs.base
158 self.path = self.wvfs.join(".hg")
158 self.path = self.wvfs.join(".hg")
159 self.origroot = path
159 self.origroot = path
160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 self.vfs = scmutil.vfs(self.path)
161 self.vfs = scmutil.vfs(self.path)
162 self.opener = self.vfs
162 self.opener = self.vfs
163 self.baseui = baseui
163 self.baseui = baseui
164 self.ui = baseui.copy()
164 self.ui = baseui.copy()
165 # A list of callback to shape the phase if no data were found.
165 # A list of callback to shape the phase if no data were found.
166 # Callback are in the form: func(repo, roots) --> processed root.
166 # Callback are in the form: func(repo, roots) --> processed root.
167 # This list it to be filled by extension during repo setup
167 # This list it to be filled by extension during repo setup
168 self._phasedefaults = []
168 self._phasedefaults = []
169 try:
169 try:
170 self.ui.readconfig(self.join("hgrc"), self.root)
170 self.ui.readconfig(self.join("hgrc"), self.root)
171 extensions.loadall(self.ui)
171 extensions.loadall(self.ui)
172 except IOError:
172 except IOError:
173 pass
173 pass
174
174
175 if not self.vfs.isdir():
175 if not self.vfs.isdir():
176 if create:
176 if create:
177 if not self.wvfs.exists():
177 if not self.wvfs.exists():
178 self.wvfs.makedirs()
178 self.wvfs.makedirs()
179 self.vfs.makedir(notindexed=True)
179 self.vfs.makedir(notindexed=True)
180 requirements = self._baserequirements(create)
180 requirements = self._baserequirements(create)
181 if self.ui.configbool('format', 'usestore', True):
181 if self.ui.configbool('format', 'usestore', True):
182 self.vfs.mkdir("store")
182 self.vfs.mkdir("store")
183 requirements.append("store")
183 requirements.append("store")
184 if self.ui.configbool('format', 'usefncache', True):
184 if self.ui.configbool('format', 'usefncache', True):
185 requirements.append("fncache")
185 requirements.append("fncache")
186 if self.ui.configbool('format', 'dotencode', True):
186 if self.ui.configbool('format', 'dotencode', True):
187 requirements.append('dotencode')
187 requirements.append('dotencode')
188 # create an invalid changelog
188 # create an invalid changelog
189 self.vfs.append(
189 self.vfs.append(
190 "00changelog.i",
190 "00changelog.i",
191 '\0\0\0\2' # represents revlogv2
191 '\0\0\0\2' # represents revlogv2
192 ' dummy changelog to prevent using the old repo layout'
192 ' dummy changelog to prevent using the old repo layout'
193 )
193 )
194 if self.ui.configbool('format', 'generaldelta', False):
194 if self.ui.configbool('format', 'generaldelta', False):
195 requirements.append("generaldelta")
195 requirements.append("generaldelta")
196 requirements = set(requirements)
196 requirements = set(requirements)
197 else:
197 else:
198 raise error.RepoError(_("repository %s not found") % path)
198 raise error.RepoError(_("repository %s not found") % path)
199 elif create:
199 elif create:
200 raise error.RepoError(_("repository %s already exists") % path)
200 raise error.RepoError(_("repository %s already exists") % path)
201 else:
201 else:
202 try:
202 try:
203 requirements = scmutil.readrequires(self.vfs, self.supported)
203 requirements = scmutil.readrequires(self.vfs, self.supported)
204 except IOError, inst:
204 except IOError, inst:
205 if inst.errno != errno.ENOENT:
205 if inst.errno != errno.ENOENT:
206 raise
206 raise
207 requirements = set()
207 requirements = set()
208
208
209 self.sharedpath = self.path
209 self.sharedpath = self.path
210 try:
210 try:
211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 if not os.path.exists(s):
212 if not os.path.exists(s):
213 raise error.RepoError(
213 raise error.RepoError(
214 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 self.sharedpath = s
215 self.sharedpath = s
216 except IOError, inst:
216 except IOError, inst:
217 if inst.errno != errno.ENOENT:
217 if inst.errno != errno.ENOENT:
218 raise
218 raise
219
219
220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 self.spath = self.store.path
221 self.spath = self.store.path
222 self.svfs = self.store.vfs
222 self.svfs = self.store.vfs
223 self.sopener = self.svfs
223 self.sopener = self.svfs
224 self.sjoin = self.store.join
224 self.sjoin = self.store.join
225 self.vfs.createmode = self.store.createmode
225 self.vfs.createmode = self.store.createmode
226 self._applyrequirements(requirements)
226 self._applyrequirements(requirements)
227 if create:
227 if create:
228 self._writerequirements()
228 self._writerequirements()
229
229
230
230
231 self._branchcache = None
231 self._branchcache = None
232 self._branchcachetip = None
232 self._branchcachetip = None
233 self.filterpats = {}
233 self.filterpats = {}
234 self._datafilters = {}
234 self._datafilters = {}
235 self._transref = self._lockref = self._wlockref = None
235 self._transref = self._lockref = self._wlockref = None
236
236
237 # A cache for various files under .hg/ that tracks file changes,
237 # A cache for various files under .hg/ that tracks file changes,
238 # (used by the filecache decorator)
238 # (used by the filecache decorator)
239 #
239 #
240 # Maps a property name to its util.filecacheentry
240 # Maps a property name to its util.filecacheentry
241 self._filecache = {}
241 self._filecache = {}
242
242
243 # hold sets of revision to be filtered
243 # hold sets of revision to be filtered
244 # should be cleared when something might have changed the filter value:
244 # should be cleared when something might have changed the filter value:
245 # - new changesets,
245 # - new changesets,
246 # - phase change,
246 # - phase change,
247 # - new obsolescence marker,
247 # - new obsolescence marker,
248 # - working directory parent change,
248 # - working directory parent change,
249 # - bookmark changes
249 # - bookmark changes
250 self.filteredrevcache = {}
250 self.filteredrevcache = {}
251
251
252 def close(self):
252 def close(self):
253 pass
253 pass
254
254
255 def _restrictcapabilities(self, caps):
255 def _restrictcapabilities(self, caps):
256 return caps
256 return caps
257
257
258 def _applyrequirements(self, requirements):
258 def _applyrequirements(self, requirements):
259 self.requirements = requirements
259 self.requirements = requirements
260 self.sopener.options = dict((r, 1) for r in requirements
260 self.sopener.options = dict((r, 1) for r in requirements
261 if r in self.openerreqs)
261 if r in self.openerreqs)
262
262
263 def _writerequirements(self):
263 def _writerequirements(self):
264 reqfile = self.opener("requires", "w")
264 reqfile = self.opener("requires", "w")
265 for r in self.requirements:
265 for r in self.requirements:
266 reqfile.write("%s\n" % r)
266 reqfile.write("%s\n" % r)
267 reqfile.close()
267 reqfile.close()
268
268
269 def _checknested(self, path):
269 def _checknested(self, path):
270 """Determine if path is a legal nested repository."""
270 """Determine if path is a legal nested repository."""
271 if not path.startswith(self.root):
271 if not path.startswith(self.root):
272 return False
272 return False
273 subpath = path[len(self.root) + 1:]
273 subpath = path[len(self.root) + 1:]
274 normsubpath = util.pconvert(subpath)
274 normsubpath = util.pconvert(subpath)
275
275
276 # XXX: Checking against the current working copy is wrong in
276 # XXX: Checking against the current working copy is wrong in
277 # the sense that it can reject things like
277 # the sense that it can reject things like
278 #
278 #
279 # $ hg cat -r 10 sub/x.txt
279 # $ hg cat -r 10 sub/x.txt
280 #
280 #
281 # if sub/ is no longer a subrepository in the working copy
281 # if sub/ is no longer a subrepository in the working copy
282 # parent revision.
282 # parent revision.
283 #
283 #
284 # However, it can of course also allow things that would have
284 # However, it can of course also allow things that would have
285 # been rejected before, such as the above cat command if sub/
285 # been rejected before, such as the above cat command if sub/
286 # is a subrepository now, but was a normal directory before.
286 # is a subrepository now, but was a normal directory before.
287 # The old path auditor would have rejected by mistake since it
287 # The old path auditor would have rejected by mistake since it
288 # panics when it sees sub/.hg/.
288 # panics when it sees sub/.hg/.
289 #
289 #
290 # All in all, checking against the working copy seems sensible
290 # All in all, checking against the working copy seems sensible
291 # since we want to prevent access to nested repositories on
291 # since we want to prevent access to nested repositories on
292 # the filesystem *now*.
292 # the filesystem *now*.
293 ctx = self[None]
293 ctx = self[None]
294 parts = util.splitpath(subpath)
294 parts = util.splitpath(subpath)
295 while parts:
295 while parts:
296 prefix = '/'.join(parts)
296 prefix = '/'.join(parts)
297 if prefix in ctx.substate:
297 if prefix in ctx.substate:
298 if prefix == normsubpath:
298 if prefix == normsubpath:
299 return True
299 return True
300 else:
300 else:
301 sub = ctx.sub(prefix)
301 sub = ctx.sub(prefix)
302 return sub.checknested(subpath[len(prefix) + 1:])
302 return sub.checknested(subpath[len(prefix) + 1:])
303 else:
303 else:
304 parts.pop()
304 parts.pop()
305 return False
305 return False
306
306
307 def peer(self):
307 def peer(self):
308 return localpeer(self) # not cached to avoid reference cycle
308 return localpeer(self) # not cached to avoid reference cycle
309
309
310 def unfiltered(self):
310 def unfiltered(self):
311 """Return unfiltered version of the repository
311 """Return unfiltered version of the repository
312
312
313 Intended to be ovewritten by filtered repo."""
313 Intended to be ovewritten by filtered repo."""
314 return self
314 return self
315
315
316 def filtered(self, name):
316 def filtered(self, name):
317 """Return a filtered version of a repository"""
317 """Return a filtered version of a repository"""
318 # build a new class with the mixin and the current class
318 # build a new class with the mixin and the current class
319 # (possibily subclass of the repo)
319 # (possibily subclass of the repo)
320 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 pass
321 pass
322 return proxycls(self, name)
322 return proxycls(self, name)
323
323
324 @repofilecache('bookmarks')
324 @repofilecache('bookmarks')
325 def _bookmarks(self):
325 def _bookmarks(self):
326 return bookmarks.bmstore(self)
326 return bookmarks.bmstore(self)
327
327
328 @repofilecache('bookmarks.current')
328 @repofilecache('bookmarks.current')
329 def _bookmarkcurrent(self):
329 def _bookmarkcurrent(self):
330 return bookmarks.readcurrent(self)
330 return bookmarks.readcurrent(self)
331
331
332 def bookmarkheads(self, bookmark):
332 def bookmarkheads(self, bookmark):
333 name = bookmark.split('@', 1)[0]
333 name = bookmark.split('@', 1)[0]
334 heads = []
334 heads = []
335 for mark, n in self._bookmarks.iteritems():
335 for mark, n in self._bookmarks.iteritems():
336 if mark.split('@', 1)[0] == name:
336 if mark.split('@', 1)[0] == name:
337 heads.append(n)
337 heads.append(n)
338 return heads
338 return heads
339
339
340 @storecache('phaseroots')
340 @storecache('phaseroots')
341 def _phasecache(self):
341 def _phasecache(self):
342 return phases.phasecache(self, self._phasedefaults)
342 return phases.phasecache(self, self._phasedefaults)
343
343
344 @storecache('obsstore')
344 @storecache('obsstore')
345 def obsstore(self):
345 def obsstore(self):
346 store = obsolete.obsstore(self.sopener)
346 store = obsolete.obsstore(self.sopener)
347 if store and not obsolete._enabled:
347 if store and not obsolete._enabled:
348 # message is rare enough to not be translated
348 # message is rare enough to not be translated
349 msg = 'obsolete feature not enabled but %i markers found!\n'
349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 self.ui.warn(msg % len(list(store)))
350 self.ui.warn(msg % len(list(store)))
351 return store
351 return store
352
352
353 @unfilteredpropertycache
353 @unfilteredpropertycache
354 def hiddenrevs(self):
354 def hiddenrevs(self):
355 """hiddenrevs: revs that should be hidden by command and tools
355 """hiddenrevs: revs that should be hidden by command and tools
356
356
357 This set is carried on the repo to ease initialization and lazy
357 This set is carried on the repo to ease initialization and lazy
358 loading; it'll probably move back to changelog for efficiency and
358 loading; it'll probably move back to changelog for efficiency and
359 consistency reasons.
359 consistency reasons.
360
360
361 Note that the hiddenrevs will needs invalidations when
361 Note that the hiddenrevs will needs invalidations when
362 - a new changesets is added (possible unstable above extinct)
362 - a new changesets is added (possible unstable above extinct)
363 - a new obsolete marker is added (possible new extinct changeset)
363 - a new obsolete marker is added (possible new extinct changeset)
364
364
365 hidden changesets cannot have non-hidden descendants
365 hidden changesets cannot have non-hidden descendants
366 """
366 """
367 hidden = set()
367 hidden = set()
368 if self.obsstore:
368 if self.obsstore:
369 ### hide extinct changeset that are not accessible by any mean
369 ### hide extinct changeset that are not accessible by any mean
370 hiddenquery = 'extinct() - ::(. + bookmark())'
370 hiddenquery = 'extinct() - ::(. + bookmark())'
371 hidden.update(self.revs(hiddenquery))
371 hidden.update(self.revs(hiddenquery))
372 return hidden
372 return hidden
373
373
374 @storecache('00changelog.i')
374 @storecache('00changelog.i')
375 def changelog(self):
375 def changelog(self):
376 c = changelog.changelog(self.sopener)
376 c = changelog.changelog(self.sopener)
377 if 'HG_PENDING' in os.environ:
377 if 'HG_PENDING' in os.environ:
378 p = os.environ['HG_PENDING']
378 p = os.environ['HG_PENDING']
379 if p.startswith(self.root):
379 if p.startswith(self.root):
380 c.readpending('00changelog.i.a')
380 c.readpending('00changelog.i.a')
381 return c
381 return c
382
382
383 @storecache('00manifest.i')
383 @storecache('00manifest.i')
384 def manifest(self):
384 def manifest(self):
385 return manifest.manifest(self.sopener)
385 return manifest.manifest(self.sopener)
386
386
387 @repofilecache('dirstate')
387 @repofilecache('dirstate')
388 def dirstate(self):
388 def dirstate(self):
389 warned = [0]
389 warned = [0]
390 def validate(node):
390 def validate(node):
391 try:
391 try:
392 self.changelog.rev(node)
392 self.changelog.rev(node)
393 return node
393 return node
394 except error.LookupError:
394 except error.LookupError:
395 if not warned[0]:
395 if not warned[0]:
396 warned[0] = True
396 warned[0] = True
397 self.ui.warn(_("warning: ignoring unknown"
397 self.ui.warn(_("warning: ignoring unknown"
398 " working parent %s!\n") % short(node))
398 " working parent %s!\n") % short(node))
399 return nullid
399 return nullid
400
400
401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
402
402
403 def __getitem__(self, changeid):
403 def __getitem__(self, changeid):
404 if changeid is None:
404 if changeid is None:
405 return context.workingctx(self)
405 return context.workingctx(self)
406 return context.changectx(self, changeid)
406 return context.changectx(self, changeid)
407
407
408 def __contains__(self, changeid):
408 def __contains__(self, changeid):
409 try:
409 try:
410 return bool(self.lookup(changeid))
410 return bool(self.lookup(changeid))
411 except error.RepoLookupError:
411 except error.RepoLookupError:
412 return False
412 return False
413
413
414 def __nonzero__(self):
414 def __nonzero__(self):
415 return True
415 return True
416
416
417 def __len__(self):
417 def __len__(self):
418 return len(self.changelog)
418 return len(self.changelog)
419
419
420 def __iter__(self):
420 def __iter__(self):
421 return iter(self.changelog)
421 return iter(self.changelog)
422
422
423 def revs(self, expr, *args):
423 def revs(self, expr, *args):
424 '''Return a list of revisions matching the given revset'''
424 '''Return a list of revisions matching the given revset'''
425 expr = revset.formatspec(expr, *args)
425 expr = revset.formatspec(expr, *args)
426 m = revset.match(None, expr)
426 m = revset.match(None, expr)
427 return [r for r in m(self, list(self))]
427 return [r for r in m(self, list(self))]
428
428
429 def set(self, expr, *args):
429 def set(self, expr, *args):
430 '''
430 '''
431 Yield a context for each matching revision, after doing arg
431 Yield a context for each matching revision, after doing arg
432 replacement via revset.formatspec
432 replacement via revset.formatspec
433 '''
433 '''
434 for r in self.revs(expr, *args):
434 for r in self.revs(expr, *args):
435 yield self[r]
435 yield self[r]
436
436
437 def url(self):
437 def url(self):
438 return 'file:' + self.root
438 return 'file:' + self.root
439
439
440 def hook(self, name, throw=False, **args):
440 def hook(self, name, throw=False, **args):
441 return hook.hook(self.ui, self, name, throw, **args)
441 return hook.hook(self.ui, self, name, throw, **args)
442
442
443 @unfilteredmethod
443 @unfilteredmethod
444 def _tag(self, names, node, message, local, user, date, extra={}):
444 def _tag(self, names, node, message, local, user, date, extra={}):
445 if isinstance(names, str):
445 if isinstance(names, str):
446 names = (names,)
446 names = (names,)
447
447
448 branches = self.branchmap()
448 branches = self.branchmap()
449 for name in names:
449 for name in names:
450 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 self.hook('pretag', throw=True, node=hex(node), tag=name,
451 local=local)
451 local=local)
452 if name in branches:
452 if name in branches:
453 self.ui.warn(_("warning: tag %s conflicts with existing"
453 self.ui.warn(_("warning: tag %s conflicts with existing"
454 " branch name\n") % name)
454 " branch name\n") % name)
455
455
456 def writetags(fp, names, munge, prevtags):
456 def writetags(fp, names, munge, prevtags):
457 fp.seek(0, 2)
457 fp.seek(0, 2)
458 if prevtags and prevtags[-1] != '\n':
458 if prevtags and prevtags[-1] != '\n':
459 fp.write('\n')
459 fp.write('\n')
460 for name in names:
460 for name in names:
461 m = munge and munge(name) or name
461 m = munge and munge(name) or name
462 if (self._tagscache.tagtypes and
462 if (self._tagscache.tagtypes and
463 name in self._tagscache.tagtypes):
463 name in self._tagscache.tagtypes):
464 old = self.tags().get(name, nullid)
464 old = self.tags().get(name, nullid)
465 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(old), m))
466 fp.write('%s %s\n' % (hex(node), m))
466 fp.write('%s %s\n' % (hex(node), m))
467 fp.close()
467 fp.close()
468
468
469 prevtags = ''
469 prevtags = ''
470 if local:
470 if local:
471 try:
471 try:
472 fp = self.opener('localtags', 'r+')
472 fp = self.opener('localtags', 'r+')
473 except IOError:
473 except IOError:
474 fp = self.opener('localtags', 'a')
474 fp = self.opener('localtags', 'a')
475 else:
475 else:
476 prevtags = fp.read()
476 prevtags = fp.read()
477
477
478 # local tags are stored in the current charset
478 # local tags are stored in the current charset
479 writetags(fp, names, None, prevtags)
479 writetags(fp, names, None, prevtags)
480 for name in names:
480 for name in names:
481 self.hook('tag', node=hex(node), tag=name, local=local)
481 self.hook('tag', node=hex(node), tag=name, local=local)
482 return
482 return
483
483
484 try:
484 try:
485 fp = self.wfile('.hgtags', 'rb+')
485 fp = self.wfile('.hgtags', 'rb+')
486 except IOError, e:
486 except IOError, e:
487 if e.errno != errno.ENOENT:
487 if e.errno != errno.ENOENT:
488 raise
488 raise
489 fp = self.wfile('.hgtags', 'ab')
489 fp = self.wfile('.hgtags', 'ab')
490 else:
490 else:
491 prevtags = fp.read()
491 prevtags = fp.read()
492
492
493 # committed tags are stored in UTF-8
493 # committed tags are stored in UTF-8
494 writetags(fp, names, encoding.fromlocal, prevtags)
494 writetags(fp, names, encoding.fromlocal, prevtags)
495
495
496 fp.close()
496 fp.close()
497
497
498 self.invalidatecaches()
498 self.invalidatecaches()
499
499
500 if '.hgtags' not in self.dirstate:
500 if '.hgtags' not in self.dirstate:
501 self[None].add(['.hgtags'])
501 self[None].add(['.hgtags'])
502
502
503 m = matchmod.exact(self.root, '', ['.hgtags'])
503 m = matchmod.exact(self.root, '', ['.hgtags'])
504 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 tagnode = self.commit(message, user, date, extra=extra, match=m)
505
505
506 for name in names:
506 for name in names:
507 self.hook('tag', node=hex(node), tag=name, local=local)
507 self.hook('tag', node=hex(node), tag=name, local=local)
508
508
509 return tagnode
509 return tagnode
510
510
511 def tag(self, names, node, message, local, user, date):
511 def tag(self, names, node, message, local, user, date):
512 '''tag a revision with one or more symbolic names.
512 '''tag a revision with one or more symbolic names.
513
513
514 names is a list of strings or, when adding a single tag, names may be a
514 names is a list of strings or, when adding a single tag, names may be a
515 string.
515 string.
516
516
517 if local is True, the tags are stored in a per-repository file.
517 if local is True, the tags are stored in a per-repository file.
518 otherwise, they are stored in the .hgtags file, and a new
518 otherwise, they are stored in the .hgtags file, and a new
519 changeset is committed with the change.
519 changeset is committed with the change.
520
520
521 keyword arguments:
521 keyword arguments:
522
522
523 local: whether to store tags in non-version-controlled file
523 local: whether to store tags in non-version-controlled file
524 (default False)
524 (default False)
525
525
526 message: commit message to use if committing
526 message: commit message to use if committing
527
527
528 user: name of user to use if committing
528 user: name of user to use if committing
529
529
530 date: date tuple to use if committing'''
530 date: date tuple to use if committing'''
531
531
532 if not local:
532 if not local:
533 for x in self.status()[:5]:
533 for x in self.status()[:5]:
534 if '.hgtags' in x:
534 if '.hgtags' in x:
535 raise util.Abort(_('working copy of .hgtags is changed '
535 raise util.Abort(_('working copy of .hgtags is changed '
536 '(please commit .hgtags manually)'))
536 '(please commit .hgtags manually)'))
537
537
538 self.tags() # instantiate the cache
538 self.tags() # instantiate the cache
539 self._tag(names, node, message, local, user, date)
539 self._tag(names, node, message, local, user, date)
540
540
541 @filteredpropertycache
541 @filteredpropertycache
542 def _tagscache(self):
542 def _tagscache(self):
543 '''Returns a tagscache object that contains various tags related
543 '''Returns a tagscache object that contains various tags related
544 caches.'''
544 caches.'''
545
545
546 # This simplifies its cache management by having one decorated
546 # This simplifies its cache management by having one decorated
547 # function (this one) and the rest simply fetch things from it.
547 # function (this one) and the rest simply fetch things from it.
548 class tagscache(object):
548 class tagscache(object):
549 def __init__(self):
549 def __init__(self):
550 # These two define the set of tags for this repository. tags
550 # These two define the set of tags for this repository. tags
551 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # maps tag name to node; tagtypes maps tag name to 'global' or
552 # 'local'. (Global tags are defined by .hgtags across all
552 # 'local'. (Global tags are defined by .hgtags across all
553 # heads, and local tags are defined in .hg/localtags.)
553 # heads, and local tags are defined in .hg/localtags.)
554 # They constitute the in-memory cache of tags.
554 # They constitute the in-memory cache of tags.
555 self.tags = self.tagtypes = None
555 self.tags = self.tagtypes = None
556
556
557 self.nodetagscache = self.tagslist = None
557 self.nodetagscache = self.tagslist = None
558
558
559 cache = tagscache()
559 cache = tagscache()
560 cache.tags, cache.tagtypes = self._findtags()
560 cache.tags, cache.tagtypes = self._findtags()
561
561
562 return cache
562 return cache
563
563
564 def tags(self):
564 def tags(self):
565 '''return a mapping of tag to node'''
565 '''return a mapping of tag to node'''
566 t = {}
566 t = {}
567 if self.changelog.filteredrevs:
567 if self.changelog.filteredrevs:
568 tags, tt = self._findtags()
568 tags, tt = self._findtags()
569 else:
569 else:
570 tags = self._tagscache.tags
570 tags = self._tagscache.tags
571 for k, v in tags.iteritems():
571 for k, v in tags.iteritems():
572 try:
572 try:
573 # ignore tags to unknown nodes
573 # ignore tags to unknown nodes
574 self.changelog.rev(v)
574 self.changelog.rev(v)
575 t[k] = v
575 t[k] = v
576 except (error.LookupError, ValueError):
576 except (error.LookupError, ValueError):
577 pass
577 pass
578 return t
578 return t
579
579
580 def _findtags(self):
580 def _findtags(self):
581 '''Do the hard work of finding tags. Return a pair of dicts
581 '''Do the hard work of finding tags. Return a pair of dicts
582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
583 maps tag name to a string like \'global\' or \'local\'.
583 maps tag name to a string like \'global\' or \'local\'.
584 Subclasses or extensions are free to add their own tags, but
584 Subclasses or extensions are free to add their own tags, but
585 should be aware that the returned dicts will be retained for the
585 should be aware that the returned dicts will be retained for the
586 duration of the localrepo object.'''
586 duration of the localrepo object.'''
587
587
588 # XXX what tagtype should subclasses/extensions use? Currently
588 # XXX what tagtype should subclasses/extensions use? Currently
589 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # mq and bookmarks add tags, but do not set the tagtype at all.
590 # Should each extension invent its own tag type? Should there
590 # Should each extension invent its own tag type? Should there
591 # be one tagtype for all such "virtual" tags? Or is the status
591 # be one tagtype for all such "virtual" tags? Or is the status
592 # quo fine?
592 # quo fine?
593
593
594 alltags = {} # map tag name to (node, hist)
594 alltags = {} # map tag name to (node, hist)
595 tagtypes = {}
595 tagtypes = {}
596
596
597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
599
599
600 # Build the return dicts. Have to re-encode tag names because
600 # Build the return dicts. Have to re-encode tag names because
601 # the tags module always uses UTF-8 (in order not to lose info
601 # the tags module always uses UTF-8 (in order not to lose info
602 # writing to the cache), but the rest of Mercurial wants them in
602 # writing to the cache), but the rest of Mercurial wants them in
603 # local encoding.
603 # local encoding.
604 tags = {}
604 tags = {}
605 for (name, (node, hist)) in alltags.iteritems():
605 for (name, (node, hist)) in alltags.iteritems():
606 if node != nullid:
606 if node != nullid:
607 tags[encoding.tolocal(name)] = node
607 tags[encoding.tolocal(name)] = node
608 tags['tip'] = self.changelog.tip()
608 tags['tip'] = self.changelog.tip()
609 tagtypes = dict([(encoding.tolocal(name), value)
609 tagtypes = dict([(encoding.tolocal(name), value)
610 for (name, value) in tagtypes.iteritems()])
610 for (name, value) in tagtypes.iteritems()])
611 return (tags, tagtypes)
611 return (tags, tagtypes)
612
612
613 def tagtype(self, tagname):
613 def tagtype(self, tagname):
614 '''
614 '''
615 return the type of the given tag. result can be:
615 return the type of the given tag. result can be:
616
616
617 'local' : a local tag
617 'local' : a local tag
618 'global' : a global tag
618 'global' : a global tag
619 None : tag does not exist
619 None : tag does not exist
620 '''
620 '''
621
621
622 return self._tagscache.tagtypes.get(tagname)
622 return self._tagscache.tagtypes.get(tagname)
623
623
624 def tagslist(self):
624 def tagslist(self):
625 '''return a list of tags ordered by revision'''
625 '''return a list of tags ordered by revision'''
626 if not self._tagscache.tagslist:
626 if not self._tagscache.tagslist:
627 l = []
627 l = []
628 for t, n in self.tags().iteritems():
628 for t, n in self.tags().iteritems():
629 r = self.changelog.rev(n)
629 r = self.changelog.rev(n)
630 l.append((r, t, n))
630 l.append((r, t, n))
631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
632
632
633 return self._tagscache.tagslist
633 return self._tagscache.tagslist
634
634
635 def nodetags(self, node):
635 def nodetags(self, node):
636 '''return the tags associated with a node'''
636 '''return the tags associated with a node'''
637 if not self._tagscache.nodetagscache:
637 if not self._tagscache.nodetagscache:
638 nodetagscache = {}
638 nodetagscache = {}
639 for t, n in self._tagscache.tags.iteritems():
639 for t, n in self._tagscache.tags.iteritems():
640 nodetagscache.setdefault(n, []).append(t)
640 nodetagscache.setdefault(n, []).append(t)
641 for tags in nodetagscache.itervalues():
641 for tags in nodetagscache.itervalues():
642 tags.sort()
642 tags.sort()
643 self._tagscache.nodetagscache = nodetagscache
643 self._tagscache.nodetagscache = nodetagscache
644 return self._tagscache.nodetagscache.get(node, [])
644 return self._tagscache.nodetagscache.get(node, [])
645
645
646 def nodebookmarks(self, node):
646 def nodebookmarks(self, node):
647 marks = []
647 marks = []
648 for bookmark, n in self._bookmarks.iteritems():
648 for bookmark, n in self._bookmarks.iteritems():
649 if n == node:
649 if n == node:
650 marks.append(bookmark)
650 marks.append(bookmark)
651 return sorted(marks)
651 return sorted(marks)
652
652
653 def _cacheabletip(self):
653 def _cacheabletip(self):
654 """tip-most revision stable enought to used in persistent cache
654 """tip-most revision stable enought to used in persistent cache
655
655
656 This function is overwritten by MQ to ensure we do not write cache for
656 This function is overwritten by MQ to ensure we do not write cache for
657 a part of the history that will likely change.
657 a part of the history that will likely change.
658
658
659 Efficient handling of filtered revision in branchcache should offer a
659 Efficient handling of filtered revision in branchcache should offer a
660 better alternative. But we are using this approach until it is ready.
660 better alternative. But we are using this approach until it is ready.
661 """
661 """
662 cl = self.changelog
662 cl = self.changelog
663 return cl.rev(cl.tip())
663 return cl.rev(cl.tip())
664
664
665 @unfilteredmethod # Until we get a smarter cache management
665 @unfilteredmethod # Until we get a smarter cache management
666 def updatebranchcache(self):
666 def updatebranchcache(self):
667 cl = self.changelog
667 cl = self.changelog
668 tip = cl.tip()
668 tip = cl.tip()
669 if self._branchcache is not None and self._branchcachetip == tip:
669 if self._branchcache is not None and self._branchcachetip == tip:
670 return
670 return
671
671
672 oldtip = self._branchcachetip
672 oldtip = self._branchcachetip
673 if oldtip is None or oldtip not in cl.nodemap:
673 if oldtip is None or oldtip not in cl.nodemap:
674 partial, last, lrev = branchmap.read(self)
674 partial, last, lrev = branchmap.read(self)
675 else:
675 else:
676 lrev = cl.rev(oldtip)
676 lrev = cl.rev(oldtip)
677 partial = self._branchcache
677 partial = self._branchcache
678
678
679 catip = self._cacheabletip()
679 catip = self._cacheabletip()
680 # if lrev == catip: cache is already up to date
680 # if lrev == catip: cache is already up to date
681 # if lrev > catip: we have uncachable element in `partial` can't write
681 # if lrev > catip: we have uncachable element in `partial` can't write
682 # on disk
682 # on disk
683 if lrev < catip:
683 if lrev < catip:
684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
685 self._updatebranchcache(partial, ctxgen)
685 branchmap.update(self, partial, ctxgen)
686 branchmap.write(self, partial, cl.node(catip), catip)
686 branchmap.write(self, partial, cl.node(catip), catip)
687 lrev = catip
687 lrev = catip
688 # If cacheable tip were lower than actual tip, we need to update the
688 # If cacheable tip were lower than actual tip, we need to update the
689 # cache up to tip. This update (from cacheable to actual tip) is not
689 # cache up to tip. This update (from cacheable to actual tip) is not
690 # written to disk since it's not cacheable.
690 # written to disk since it's not cacheable.
691 tiprev = len(self) - 1
691 tiprev = len(self) - 1
692 if lrev < tiprev:
692 if lrev < tiprev:
693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
694 self._updatebranchcache(partial, ctxgen)
694 branchmap.update(self, partial, ctxgen)
695 self._branchcache = partial
695 self._branchcache = partial
696 self._branchcachetip = tip
696 self._branchcachetip = tip
697
697
698 def branchmap(self):
698 def branchmap(self):
699 '''returns a dictionary {branch: [branchheads]}'''
699 '''returns a dictionary {branch: [branchheads]}'''
700 if self.changelog.filteredrevs:
700 if self.changelog.filteredrevs:
701 # some changeset are excluded we can't use the cache
701 # some changeset are excluded we can't use the cache
702 branchmap = {}
702 bmap = {}
703 self._updatebranchcache(branchmap, (self[r] for r in self))
703 branchmap.update(self, bmap, (self[r] for r in self))
704 return branchmap
704 return bmap
705 else:
705 else:
706 self.updatebranchcache()
706 self.updatebranchcache()
707 return self._branchcache
707 return self._branchcache
708
708
709
709
710 def _branchtip(self, heads):
710 def _branchtip(self, heads):
711 '''return the tipmost branch head in heads'''
711 '''return the tipmost branch head in heads'''
712 tip = heads[-1]
712 tip = heads[-1]
713 for h in reversed(heads):
713 for h in reversed(heads):
714 if not self[h].closesbranch():
714 if not self[h].closesbranch():
715 tip = h
715 tip = h
716 break
716 break
717 return tip
717 return tip
718
718
719 def branchtip(self, branch):
719 def branchtip(self, branch):
720 '''return the tip node for a given branch'''
720 '''return the tip node for a given branch'''
721 if branch not in self.branchmap():
721 if branch not in self.branchmap():
722 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
722 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
723 return self._branchtip(self.branchmap()[branch])
723 return self._branchtip(self.branchmap()[branch])
724
724
725 def branchtags(self):
725 def branchtags(self):
726 '''return a dict where branch names map to the tipmost head of
726 '''return a dict where branch names map to the tipmost head of
727 the branch, open heads come before closed'''
727 the branch, open heads come before closed'''
728 bt = {}
728 bt = {}
729 for bn, heads in self.branchmap().iteritems():
729 for bn, heads in self.branchmap().iteritems():
730 bt[bn] = self._branchtip(heads)
730 bt[bn] = self._branchtip(heads)
731 return bt
731 return bt
732
732
733 def _updatebranchcache(self, partial, ctxgen):
734 """Given a branchhead cache, partial, that may have extra nodes or be
735 missing heads, and a generator of nodes that are at least a superset of
736 heads missing, this function updates partial to be correct.
737 """
738 # collect new branch entries
739 newbranches = {}
740 for c in ctxgen:
741 newbranches.setdefault(c.branch(), []).append(c.node())
742 # if older branchheads are reachable from new ones, they aren't
743 # really branchheads. Note checking parents is insufficient:
744 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
745 for branch, newnodes in newbranches.iteritems():
746 bheads = partial.setdefault(branch, [])
747 # Remove candidate heads that no longer are in the repo (e.g., as
748 # the result of a strip that just happened). Avoid using 'node in
749 # self' here because that dives down into branchcache code somewhat
750 # recursively.
751 bheadrevs = [self.changelog.rev(node) for node in bheads
752 if self.changelog.hasnode(node)]
753 newheadrevs = [self.changelog.rev(node) for node in newnodes
754 if self.changelog.hasnode(node)]
755 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
756 # Remove duplicates - nodes that are in newheadrevs and are already
757 # in bheadrevs. This can happen if you strip a node whose parent
758 # was already a head (because they're on different branches).
759 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
760
761 # Starting from tip means fewer passes over reachable. If we know
762 # the new candidates are not ancestors of existing heads, we don't
763 # have to examine ancestors of existing heads
764 if ctxisnew:
765 iterrevs = sorted(newheadrevs)
766 else:
767 iterrevs = list(bheadrevs)
768
769 # This loop prunes out two kinds of heads - heads that are
770 # superseded by a head in newheadrevs, and newheadrevs that are not
771 # heads because an existing head is their descendant.
772 while iterrevs:
773 latest = iterrevs.pop()
774 if latest not in bheadrevs:
775 continue
776 ancestors = set(self.changelog.ancestors([latest],
777 bheadrevs[0]))
778 if ancestors:
779 bheadrevs = [b for b in bheadrevs if b not in ancestors]
780 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
781
782 # There may be branches that cease to exist when the last commit in the
783 # branch was stripped. This code filters them out. Note that the
784 # branch that ceased to exist may not be in newbranches because
785 # newbranches is the set of candidate heads, which when you strip the
786 # last commit in a branch will be the parent branch.
787 for branch in partial.keys():
788 nodes = [head for head in partial[branch]
789 if self.changelog.hasnode(head)]
790 if not nodes:
791 del partial[branch]
792
793 def lookup(self, key):
733 def lookup(self, key):
794 return self[key].node()
734 return self[key].node()
795
735
796 def lookupbranch(self, key, remote=None):
736 def lookupbranch(self, key, remote=None):
797 repo = remote or self
737 repo = remote or self
798 if key in repo.branchmap():
738 if key in repo.branchmap():
799 return key
739 return key
800
740
801 repo = (remote and remote.local()) and remote or self
741 repo = (remote and remote.local()) and remote or self
802 return repo[key].branch()
742 return repo[key].branch()
803
743
804 def known(self, nodes):
744 def known(self, nodes):
805 nm = self.changelog.nodemap
745 nm = self.changelog.nodemap
806 pc = self._phasecache
746 pc = self._phasecache
807 result = []
747 result = []
808 for n in nodes:
748 for n in nodes:
809 r = nm.get(n)
749 r = nm.get(n)
810 resp = not (r is None or pc.phase(self, r) >= phases.secret)
750 resp = not (r is None or pc.phase(self, r) >= phases.secret)
811 result.append(resp)
751 result.append(resp)
812 return result
752 return result
813
753
814 def local(self):
754 def local(self):
815 return self
755 return self
816
756
817 def cancopy(self):
757 def cancopy(self):
818 return self.local() # so statichttprepo's override of local() works
758 return self.local() # so statichttprepo's override of local() works
819
759
820 def join(self, f):
760 def join(self, f):
821 return os.path.join(self.path, f)
761 return os.path.join(self.path, f)
822
762
823 def wjoin(self, f):
763 def wjoin(self, f):
824 return os.path.join(self.root, f)
764 return os.path.join(self.root, f)
825
765
826 def file(self, f):
766 def file(self, f):
827 if f[0] == '/':
767 if f[0] == '/':
828 f = f[1:]
768 f = f[1:]
829 return filelog.filelog(self.sopener, f)
769 return filelog.filelog(self.sopener, f)
830
770
831 def changectx(self, changeid):
771 def changectx(self, changeid):
832 return self[changeid]
772 return self[changeid]
833
773
834 def parents(self, changeid=None):
774 def parents(self, changeid=None):
835 '''get list of changectxs for parents of changeid'''
775 '''get list of changectxs for parents of changeid'''
836 return self[changeid].parents()
776 return self[changeid].parents()
837
777
838 def setparents(self, p1, p2=nullid):
778 def setparents(self, p1, p2=nullid):
839 copies = self.dirstate.setparents(p1, p2)
779 copies = self.dirstate.setparents(p1, p2)
840 if copies:
780 if copies:
841 # Adjust copy records, the dirstate cannot do it, it
781 # Adjust copy records, the dirstate cannot do it, it
842 # requires access to parents manifests. Preserve them
782 # requires access to parents manifests. Preserve them
843 # only for entries added to first parent.
783 # only for entries added to first parent.
844 pctx = self[p1]
784 pctx = self[p1]
845 for f in copies:
785 for f in copies:
846 if f not in pctx and copies[f] in pctx:
786 if f not in pctx and copies[f] in pctx:
847 self.dirstate.copy(copies[f], f)
787 self.dirstate.copy(copies[f], f)
848
788
849 def filectx(self, path, changeid=None, fileid=None):
789 def filectx(self, path, changeid=None, fileid=None):
850 """changeid can be a changeset revision, node, or tag.
790 """changeid can be a changeset revision, node, or tag.
851 fileid can be a file revision or node."""
791 fileid can be a file revision or node."""
852 return context.filectx(self, path, changeid, fileid)
792 return context.filectx(self, path, changeid, fileid)
853
793
854 def getcwd(self):
794 def getcwd(self):
855 return self.dirstate.getcwd()
795 return self.dirstate.getcwd()
856
796
857 def pathto(self, f, cwd=None):
797 def pathto(self, f, cwd=None):
858 return self.dirstate.pathto(f, cwd)
798 return self.dirstate.pathto(f, cwd)
859
799
860 def wfile(self, f, mode='r'):
800 def wfile(self, f, mode='r'):
861 return self.wopener(f, mode)
801 return self.wopener(f, mode)
862
802
863 def _link(self, f):
803 def _link(self, f):
864 return os.path.islink(self.wjoin(f))
804 return os.path.islink(self.wjoin(f))
865
805
866 def _loadfilter(self, filter):
806 def _loadfilter(self, filter):
867 if filter not in self.filterpats:
807 if filter not in self.filterpats:
868 l = []
808 l = []
869 for pat, cmd in self.ui.configitems(filter):
809 for pat, cmd in self.ui.configitems(filter):
870 if cmd == '!':
810 if cmd == '!':
871 continue
811 continue
872 mf = matchmod.match(self.root, '', [pat])
812 mf = matchmod.match(self.root, '', [pat])
873 fn = None
813 fn = None
874 params = cmd
814 params = cmd
875 for name, filterfn in self._datafilters.iteritems():
815 for name, filterfn in self._datafilters.iteritems():
876 if cmd.startswith(name):
816 if cmd.startswith(name):
877 fn = filterfn
817 fn = filterfn
878 params = cmd[len(name):].lstrip()
818 params = cmd[len(name):].lstrip()
879 break
819 break
880 if not fn:
820 if not fn:
881 fn = lambda s, c, **kwargs: util.filter(s, c)
821 fn = lambda s, c, **kwargs: util.filter(s, c)
882 # Wrap old filters not supporting keyword arguments
822 # Wrap old filters not supporting keyword arguments
883 if not inspect.getargspec(fn)[2]:
823 if not inspect.getargspec(fn)[2]:
884 oldfn = fn
824 oldfn = fn
885 fn = lambda s, c, **kwargs: oldfn(s, c)
825 fn = lambda s, c, **kwargs: oldfn(s, c)
886 l.append((mf, fn, params))
826 l.append((mf, fn, params))
887 self.filterpats[filter] = l
827 self.filterpats[filter] = l
888 return self.filterpats[filter]
828 return self.filterpats[filter]
889
829
890 def _filter(self, filterpats, filename, data):
830 def _filter(self, filterpats, filename, data):
891 for mf, fn, cmd in filterpats:
831 for mf, fn, cmd in filterpats:
892 if mf(filename):
832 if mf(filename):
893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
833 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
834 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 break
835 break
896
836
897 return data
837 return data
898
838
899 @unfilteredpropertycache
839 @unfilteredpropertycache
900 def _encodefilterpats(self):
840 def _encodefilterpats(self):
901 return self._loadfilter('encode')
841 return self._loadfilter('encode')
902
842
903 @unfilteredpropertycache
843 @unfilteredpropertycache
904 def _decodefilterpats(self):
844 def _decodefilterpats(self):
905 return self._loadfilter('decode')
845 return self._loadfilter('decode')
906
846
907 def adddatafilter(self, name, filter):
847 def adddatafilter(self, name, filter):
908 self._datafilters[name] = filter
848 self._datafilters[name] = filter
909
849
910 def wread(self, filename):
850 def wread(self, filename):
911 if self._link(filename):
851 if self._link(filename):
912 data = os.readlink(self.wjoin(filename))
852 data = os.readlink(self.wjoin(filename))
913 else:
853 else:
914 data = self.wopener.read(filename)
854 data = self.wopener.read(filename)
915 return self._filter(self._encodefilterpats, filename, data)
855 return self._filter(self._encodefilterpats, filename, data)
916
856
917 def wwrite(self, filename, data, flags):
857 def wwrite(self, filename, data, flags):
918 data = self._filter(self._decodefilterpats, filename, data)
858 data = self._filter(self._decodefilterpats, filename, data)
919 if 'l' in flags:
859 if 'l' in flags:
920 self.wopener.symlink(data, filename)
860 self.wopener.symlink(data, filename)
921 else:
861 else:
922 self.wopener.write(filename, data)
862 self.wopener.write(filename, data)
923 if 'x' in flags:
863 if 'x' in flags:
924 util.setflags(self.wjoin(filename), False, True)
864 util.setflags(self.wjoin(filename), False, True)
925
865
926 def wwritedata(self, filename, data):
866 def wwritedata(self, filename, data):
927 return self._filter(self._decodefilterpats, filename, data)
867 return self._filter(self._decodefilterpats, filename, data)
928
868
929 def transaction(self, desc):
869 def transaction(self, desc):
930 tr = self._transref and self._transref() or None
870 tr = self._transref and self._transref() or None
931 if tr and tr.running():
871 if tr and tr.running():
932 return tr.nest()
872 return tr.nest()
933
873
934 # abort here if the journal already exists
874 # abort here if the journal already exists
935 if os.path.exists(self.sjoin("journal")):
875 if os.path.exists(self.sjoin("journal")):
936 raise error.RepoError(
876 raise error.RepoError(
937 _("abandoned transaction found - run hg recover"))
877 _("abandoned transaction found - run hg recover"))
938
878
939 self._writejournal(desc)
879 self._writejournal(desc)
940 renames = [(x, undoname(x)) for x in self._journalfiles()]
880 renames = [(x, undoname(x)) for x in self._journalfiles()]
941
881
942 tr = transaction.transaction(self.ui.warn, self.sopener,
882 tr = transaction.transaction(self.ui.warn, self.sopener,
943 self.sjoin("journal"),
883 self.sjoin("journal"),
944 aftertrans(renames),
884 aftertrans(renames),
945 self.store.createmode)
885 self.store.createmode)
946 self._transref = weakref.ref(tr)
886 self._transref = weakref.ref(tr)
947 return tr
887 return tr
948
888
949 def _journalfiles(self):
889 def _journalfiles(self):
950 return (self.sjoin('journal'), self.join('journal.dirstate'),
890 return (self.sjoin('journal'), self.join('journal.dirstate'),
951 self.join('journal.branch'), self.join('journal.desc'),
891 self.join('journal.branch'), self.join('journal.desc'),
952 self.join('journal.bookmarks'),
892 self.join('journal.bookmarks'),
953 self.sjoin('journal.phaseroots'))
893 self.sjoin('journal.phaseroots'))
954
894
955 def undofiles(self):
895 def undofiles(self):
956 return [undoname(x) for x in self._journalfiles()]
896 return [undoname(x) for x in self._journalfiles()]
957
897
958 def _writejournal(self, desc):
898 def _writejournal(self, desc):
959 self.opener.write("journal.dirstate",
899 self.opener.write("journal.dirstate",
960 self.opener.tryread("dirstate"))
900 self.opener.tryread("dirstate"))
961 self.opener.write("journal.branch",
901 self.opener.write("journal.branch",
962 encoding.fromlocal(self.dirstate.branch()))
902 encoding.fromlocal(self.dirstate.branch()))
963 self.opener.write("journal.desc",
903 self.opener.write("journal.desc",
964 "%d\n%s\n" % (len(self), desc))
904 "%d\n%s\n" % (len(self), desc))
965 self.opener.write("journal.bookmarks",
905 self.opener.write("journal.bookmarks",
966 self.opener.tryread("bookmarks"))
906 self.opener.tryread("bookmarks"))
967 self.sopener.write("journal.phaseroots",
907 self.sopener.write("journal.phaseroots",
968 self.sopener.tryread("phaseroots"))
908 self.sopener.tryread("phaseroots"))
969
909
970 def recover(self):
910 def recover(self):
971 lock = self.lock()
911 lock = self.lock()
972 try:
912 try:
973 if os.path.exists(self.sjoin("journal")):
913 if os.path.exists(self.sjoin("journal")):
974 self.ui.status(_("rolling back interrupted transaction\n"))
914 self.ui.status(_("rolling back interrupted transaction\n"))
975 transaction.rollback(self.sopener, self.sjoin("journal"),
915 transaction.rollback(self.sopener, self.sjoin("journal"),
976 self.ui.warn)
916 self.ui.warn)
977 self.invalidate()
917 self.invalidate()
978 return True
918 return True
979 else:
919 else:
980 self.ui.warn(_("no interrupted transaction available\n"))
920 self.ui.warn(_("no interrupted transaction available\n"))
981 return False
921 return False
982 finally:
922 finally:
983 lock.release()
923 lock.release()
984
924
985 def rollback(self, dryrun=False, force=False):
925 def rollback(self, dryrun=False, force=False):
986 wlock = lock = None
926 wlock = lock = None
987 try:
927 try:
988 wlock = self.wlock()
928 wlock = self.wlock()
989 lock = self.lock()
929 lock = self.lock()
990 if os.path.exists(self.sjoin("undo")):
930 if os.path.exists(self.sjoin("undo")):
991 return self._rollback(dryrun, force)
931 return self._rollback(dryrun, force)
992 else:
932 else:
993 self.ui.warn(_("no rollback information available\n"))
933 self.ui.warn(_("no rollback information available\n"))
994 return 1
934 return 1
995 finally:
935 finally:
996 release(lock, wlock)
936 release(lock, wlock)
997
937
998 @unfilteredmethod # Until we get smarter cache management
938 @unfilteredmethod # Until we get smarter cache management
999 def _rollback(self, dryrun, force):
939 def _rollback(self, dryrun, force):
1000 ui = self.ui
940 ui = self.ui
1001 try:
941 try:
1002 args = self.opener.read('undo.desc').splitlines()
942 args = self.opener.read('undo.desc').splitlines()
1003 (oldlen, desc, detail) = (int(args[0]), args[1], None)
943 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1004 if len(args) >= 3:
944 if len(args) >= 3:
1005 detail = args[2]
945 detail = args[2]
1006 oldtip = oldlen - 1
946 oldtip = oldlen - 1
1007
947
1008 if detail and ui.verbose:
948 if detail and ui.verbose:
1009 msg = (_('repository tip rolled back to revision %s'
949 msg = (_('repository tip rolled back to revision %s'
1010 ' (undo %s: %s)\n')
950 ' (undo %s: %s)\n')
1011 % (oldtip, desc, detail))
951 % (oldtip, desc, detail))
1012 else:
952 else:
1013 msg = (_('repository tip rolled back to revision %s'
953 msg = (_('repository tip rolled back to revision %s'
1014 ' (undo %s)\n')
954 ' (undo %s)\n')
1015 % (oldtip, desc))
955 % (oldtip, desc))
1016 except IOError:
956 except IOError:
1017 msg = _('rolling back unknown transaction\n')
957 msg = _('rolling back unknown transaction\n')
1018 desc = None
958 desc = None
1019
959
1020 if not force and self['.'] != self['tip'] and desc == 'commit':
960 if not force and self['.'] != self['tip'] and desc == 'commit':
1021 raise util.Abort(
961 raise util.Abort(
1022 _('rollback of last commit while not checked out '
962 _('rollback of last commit while not checked out '
1023 'may lose data'), hint=_('use -f to force'))
963 'may lose data'), hint=_('use -f to force'))
1024
964
1025 ui.status(msg)
965 ui.status(msg)
1026 if dryrun:
966 if dryrun:
1027 return 0
967 return 0
1028
968
1029 parents = self.dirstate.parents()
969 parents = self.dirstate.parents()
1030 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
970 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1031 if os.path.exists(self.join('undo.bookmarks')):
971 if os.path.exists(self.join('undo.bookmarks')):
1032 util.rename(self.join('undo.bookmarks'),
972 util.rename(self.join('undo.bookmarks'),
1033 self.join('bookmarks'))
973 self.join('bookmarks'))
1034 if os.path.exists(self.sjoin('undo.phaseroots')):
974 if os.path.exists(self.sjoin('undo.phaseroots')):
1035 util.rename(self.sjoin('undo.phaseroots'),
975 util.rename(self.sjoin('undo.phaseroots'),
1036 self.sjoin('phaseroots'))
976 self.sjoin('phaseroots'))
1037 self.invalidate()
977 self.invalidate()
1038
978
1039 # Discard all cache entries to force reloading everything.
979 # Discard all cache entries to force reloading everything.
1040 self._filecache.clear()
980 self._filecache.clear()
1041
981
1042 parentgone = (parents[0] not in self.changelog.nodemap or
982 parentgone = (parents[0] not in self.changelog.nodemap or
1043 parents[1] not in self.changelog.nodemap)
983 parents[1] not in self.changelog.nodemap)
1044 if parentgone:
984 if parentgone:
1045 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
985 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1046 try:
986 try:
1047 branch = self.opener.read('undo.branch')
987 branch = self.opener.read('undo.branch')
1048 self.dirstate.setbranch(encoding.tolocal(branch))
988 self.dirstate.setbranch(encoding.tolocal(branch))
1049 except IOError:
989 except IOError:
1050 ui.warn(_('named branch could not be reset: '
990 ui.warn(_('named branch could not be reset: '
1051 'current branch is still \'%s\'\n')
991 'current branch is still \'%s\'\n')
1052 % self.dirstate.branch())
992 % self.dirstate.branch())
1053
993
1054 self.dirstate.invalidate()
994 self.dirstate.invalidate()
1055 parents = tuple([p.rev() for p in self.parents()])
995 parents = tuple([p.rev() for p in self.parents()])
1056 if len(parents) > 1:
996 if len(parents) > 1:
1057 ui.status(_('working directory now based on '
997 ui.status(_('working directory now based on '
1058 'revisions %d and %d\n') % parents)
998 'revisions %d and %d\n') % parents)
1059 else:
999 else:
1060 ui.status(_('working directory now based on '
1000 ui.status(_('working directory now based on '
1061 'revision %d\n') % parents)
1001 'revision %d\n') % parents)
1062 # TODO: if we know which new heads may result from this rollback, pass
1002 # TODO: if we know which new heads may result from this rollback, pass
1063 # them to destroy(), which will prevent the branchhead cache from being
1003 # them to destroy(), which will prevent the branchhead cache from being
1064 # invalidated.
1004 # invalidated.
1065 self.destroyed()
1005 self.destroyed()
1066 return 0
1006 return 0
1067
1007
1068 def invalidatecaches(self):
1008 def invalidatecaches(self):
1069
1009
1070 if '_tagscache' in vars(self):
1010 if '_tagscache' in vars(self):
1071 # can't use delattr on proxy
1011 # can't use delattr on proxy
1072 del self.__dict__['_tagscache']
1012 del self.__dict__['_tagscache']
1073
1013
1074 self.unfiltered()._branchcache = None # in UTF-8
1014 self.unfiltered()._branchcache = None # in UTF-8
1075 self.unfiltered()._branchcachetip = None
1015 self.unfiltered()._branchcachetip = None
1076 self.invalidatevolatilesets()
1016 self.invalidatevolatilesets()
1077
1017
1078 def invalidatevolatilesets(self):
1018 def invalidatevolatilesets(self):
1079 self.filteredrevcache.clear()
1019 self.filteredrevcache.clear()
1080 obsolete.clearobscaches(self)
1020 obsolete.clearobscaches(self)
1081 if 'hiddenrevs' in vars(self):
1021 if 'hiddenrevs' in vars(self):
1082 del self.hiddenrevs
1022 del self.hiddenrevs
1083
1023
1084 def invalidatedirstate(self):
1024 def invalidatedirstate(self):
1085 '''Invalidates the dirstate, causing the next call to dirstate
1025 '''Invalidates the dirstate, causing the next call to dirstate
1086 to check if it was modified since the last time it was read,
1026 to check if it was modified since the last time it was read,
1087 rereading it if it has.
1027 rereading it if it has.
1088
1028
1089 This is different to dirstate.invalidate() that it doesn't always
1029 This is different to dirstate.invalidate() that it doesn't always
1090 rereads the dirstate. Use dirstate.invalidate() if you want to
1030 rereads the dirstate. Use dirstate.invalidate() if you want to
1091 explicitly read the dirstate again (i.e. restoring it to a previous
1031 explicitly read the dirstate again (i.e. restoring it to a previous
1092 known good state).'''
1032 known good state).'''
1093 if hasunfilteredcache(self, 'dirstate'):
1033 if hasunfilteredcache(self, 'dirstate'):
1094 for k in self.dirstate._filecache:
1034 for k in self.dirstate._filecache:
1095 try:
1035 try:
1096 delattr(self.dirstate, k)
1036 delattr(self.dirstate, k)
1097 except AttributeError:
1037 except AttributeError:
1098 pass
1038 pass
1099 delattr(self.unfiltered(), 'dirstate')
1039 delattr(self.unfiltered(), 'dirstate')
1100
1040
1101 def invalidate(self):
1041 def invalidate(self):
1102 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1042 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1103 for k in self._filecache:
1043 for k in self._filecache:
1104 # dirstate is invalidated separately in invalidatedirstate()
1044 # dirstate is invalidated separately in invalidatedirstate()
1105 if k == 'dirstate':
1045 if k == 'dirstate':
1106 continue
1046 continue
1107
1047
1108 try:
1048 try:
1109 delattr(unfiltered, k)
1049 delattr(unfiltered, k)
1110 except AttributeError:
1050 except AttributeError:
1111 pass
1051 pass
1112 self.invalidatecaches()
1052 self.invalidatecaches()
1113
1053
1114 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1054 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1115 try:
1055 try:
1116 l = lock.lock(lockname, 0, releasefn, desc=desc)
1056 l = lock.lock(lockname, 0, releasefn, desc=desc)
1117 except error.LockHeld, inst:
1057 except error.LockHeld, inst:
1118 if not wait:
1058 if not wait:
1119 raise
1059 raise
1120 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1060 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1121 (desc, inst.locker))
1061 (desc, inst.locker))
1122 # default to 600 seconds timeout
1062 # default to 600 seconds timeout
1123 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1063 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1124 releasefn, desc=desc)
1064 releasefn, desc=desc)
1125 if acquirefn:
1065 if acquirefn:
1126 acquirefn()
1066 acquirefn()
1127 return l
1067 return l
1128
1068
1129 def _afterlock(self, callback):
1069 def _afterlock(self, callback):
1130 """add a callback to the current repository lock.
1070 """add a callback to the current repository lock.
1131
1071
1132 The callback will be executed on lock release."""
1072 The callback will be executed on lock release."""
1133 l = self._lockref and self._lockref()
1073 l = self._lockref and self._lockref()
1134 if l:
1074 if l:
1135 l.postrelease.append(callback)
1075 l.postrelease.append(callback)
1136 else:
1076 else:
1137 callback()
1077 callback()
1138
1078
1139 def lock(self, wait=True):
1079 def lock(self, wait=True):
1140 '''Lock the repository store (.hg/store) and return a weak reference
1080 '''Lock the repository store (.hg/store) and return a weak reference
1141 to the lock. Use this before modifying the store (e.g. committing or
1081 to the lock. Use this before modifying the store (e.g. committing or
1142 stripping). If you are opening a transaction, get a lock as well.)'''
1082 stripping). If you are opening a transaction, get a lock as well.)'''
1143 l = self._lockref and self._lockref()
1083 l = self._lockref and self._lockref()
1144 if l is not None and l.held:
1084 if l is not None and l.held:
1145 l.lock()
1085 l.lock()
1146 return l
1086 return l
1147
1087
1148 def unlock():
1088 def unlock():
1149 self.store.write()
1089 self.store.write()
1150 if hasunfilteredcache(self, '_phasecache'):
1090 if hasunfilteredcache(self, '_phasecache'):
1151 self._phasecache.write()
1091 self._phasecache.write()
1152 for k, ce in self._filecache.items():
1092 for k, ce in self._filecache.items():
1153 if k == 'dirstate':
1093 if k == 'dirstate':
1154 continue
1094 continue
1155 ce.refresh()
1095 ce.refresh()
1156
1096
1157 l = self._lock(self.sjoin("lock"), wait, unlock,
1097 l = self._lock(self.sjoin("lock"), wait, unlock,
1158 self.invalidate, _('repository %s') % self.origroot)
1098 self.invalidate, _('repository %s') % self.origroot)
1159 self._lockref = weakref.ref(l)
1099 self._lockref = weakref.ref(l)
1160 return l
1100 return l
1161
1101
1162 def wlock(self, wait=True):
1102 def wlock(self, wait=True):
1163 '''Lock the non-store parts of the repository (everything under
1103 '''Lock the non-store parts of the repository (everything under
1164 .hg except .hg/store) and return a weak reference to the lock.
1104 .hg except .hg/store) and return a weak reference to the lock.
1165 Use this before modifying files in .hg.'''
1105 Use this before modifying files in .hg.'''
1166 l = self._wlockref and self._wlockref()
1106 l = self._wlockref and self._wlockref()
1167 if l is not None and l.held:
1107 if l is not None and l.held:
1168 l.lock()
1108 l.lock()
1169 return l
1109 return l
1170
1110
1171 def unlock():
1111 def unlock():
1172 self.dirstate.write()
1112 self.dirstate.write()
1173 ce = self._filecache.get('dirstate')
1113 ce = self._filecache.get('dirstate')
1174 if ce:
1114 if ce:
1175 ce.refresh()
1115 ce.refresh()
1176
1116
1177 l = self._lock(self.join("wlock"), wait, unlock,
1117 l = self._lock(self.join("wlock"), wait, unlock,
1178 self.invalidatedirstate, _('working directory of %s') %
1118 self.invalidatedirstate, _('working directory of %s') %
1179 self.origroot)
1119 self.origroot)
1180 self._wlockref = weakref.ref(l)
1120 self._wlockref = weakref.ref(l)
1181 return l
1121 return l
1182
1122
1183 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1123 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1184 """
1124 """
1185 commit an individual file as part of a larger transaction
1125 commit an individual file as part of a larger transaction
1186 """
1126 """
1187
1127
1188 fname = fctx.path()
1128 fname = fctx.path()
1189 text = fctx.data()
1129 text = fctx.data()
1190 flog = self.file(fname)
1130 flog = self.file(fname)
1191 fparent1 = manifest1.get(fname, nullid)
1131 fparent1 = manifest1.get(fname, nullid)
1192 fparent2 = fparent2o = manifest2.get(fname, nullid)
1132 fparent2 = fparent2o = manifest2.get(fname, nullid)
1193
1133
1194 meta = {}
1134 meta = {}
1195 copy = fctx.renamed()
1135 copy = fctx.renamed()
1196 if copy and copy[0] != fname:
1136 if copy and copy[0] != fname:
1197 # Mark the new revision of this file as a copy of another
1137 # Mark the new revision of this file as a copy of another
1198 # file. This copy data will effectively act as a parent
1138 # file. This copy data will effectively act as a parent
1199 # of this new revision. If this is a merge, the first
1139 # of this new revision. If this is a merge, the first
1200 # parent will be the nullid (meaning "look up the copy data")
1140 # parent will be the nullid (meaning "look up the copy data")
1201 # and the second one will be the other parent. For example:
1141 # and the second one will be the other parent. For example:
1202 #
1142 #
1203 # 0 --- 1 --- 3 rev1 changes file foo
1143 # 0 --- 1 --- 3 rev1 changes file foo
1204 # \ / rev2 renames foo to bar and changes it
1144 # \ / rev2 renames foo to bar and changes it
1205 # \- 2 -/ rev3 should have bar with all changes and
1145 # \- 2 -/ rev3 should have bar with all changes and
1206 # should record that bar descends from
1146 # should record that bar descends from
1207 # bar in rev2 and foo in rev1
1147 # bar in rev2 and foo in rev1
1208 #
1148 #
1209 # this allows this merge to succeed:
1149 # this allows this merge to succeed:
1210 #
1150 #
1211 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1151 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1212 # \ / merging rev3 and rev4 should use bar@rev2
1152 # \ / merging rev3 and rev4 should use bar@rev2
1213 # \- 2 --- 4 as the merge base
1153 # \- 2 --- 4 as the merge base
1214 #
1154 #
1215
1155
1216 cfname = copy[0]
1156 cfname = copy[0]
1217 crev = manifest1.get(cfname)
1157 crev = manifest1.get(cfname)
1218 newfparent = fparent2
1158 newfparent = fparent2
1219
1159
1220 if manifest2: # branch merge
1160 if manifest2: # branch merge
1221 if fparent2 == nullid or crev is None: # copied on remote side
1161 if fparent2 == nullid or crev is None: # copied on remote side
1222 if cfname in manifest2:
1162 if cfname in manifest2:
1223 crev = manifest2[cfname]
1163 crev = manifest2[cfname]
1224 newfparent = fparent1
1164 newfparent = fparent1
1225
1165
1226 # find source in nearest ancestor if we've lost track
1166 # find source in nearest ancestor if we've lost track
1227 if not crev:
1167 if not crev:
1228 self.ui.debug(" %s: searching for copy revision for %s\n" %
1168 self.ui.debug(" %s: searching for copy revision for %s\n" %
1229 (fname, cfname))
1169 (fname, cfname))
1230 for ancestor in self[None].ancestors():
1170 for ancestor in self[None].ancestors():
1231 if cfname in ancestor:
1171 if cfname in ancestor:
1232 crev = ancestor[cfname].filenode()
1172 crev = ancestor[cfname].filenode()
1233 break
1173 break
1234
1174
1235 if crev:
1175 if crev:
1236 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1176 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1237 meta["copy"] = cfname
1177 meta["copy"] = cfname
1238 meta["copyrev"] = hex(crev)
1178 meta["copyrev"] = hex(crev)
1239 fparent1, fparent2 = nullid, newfparent
1179 fparent1, fparent2 = nullid, newfparent
1240 else:
1180 else:
1241 self.ui.warn(_("warning: can't find ancestor for '%s' "
1181 self.ui.warn(_("warning: can't find ancestor for '%s' "
1242 "copied from '%s'!\n") % (fname, cfname))
1182 "copied from '%s'!\n") % (fname, cfname))
1243
1183
1244 elif fparent2 != nullid:
1184 elif fparent2 != nullid:
1245 # is one parent an ancestor of the other?
1185 # is one parent an ancestor of the other?
1246 fparentancestor = flog.ancestor(fparent1, fparent2)
1186 fparentancestor = flog.ancestor(fparent1, fparent2)
1247 if fparentancestor == fparent1:
1187 if fparentancestor == fparent1:
1248 fparent1, fparent2 = fparent2, nullid
1188 fparent1, fparent2 = fparent2, nullid
1249 elif fparentancestor == fparent2:
1189 elif fparentancestor == fparent2:
1250 fparent2 = nullid
1190 fparent2 = nullid
1251
1191
1252 # is the file changed?
1192 # is the file changed?
1253 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1193 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1254 changelist.append(fname)
1194 changelist.append(fname)
1255 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1195 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1256
1196
1257 # are just the flags changed during merge?
1197 # are just the flags changed during merge?
1258 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1198 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1259 changelist.append(fname)
1199 changelist.append(fname)
1260
1200
1261 return fparent1
1201 return fparent1
1262
1202
1263 @unfilteredmethod
1203 @unfilteredmethod
1264 def commit(self, text="", user=None, date=None, match=None, force=False,
1204 def commit(self, text="", user=None, date=None, match=None, force=False,
1265 editor=False, extra={}):
1205 editor=False, extra={}):
1266 """Add a new revision to current repository.
1206 """Add a new revision to current repository.
1267
1207
1268 Revision information is gathered from the working directory,
1208 Revision information is gathered from the working directory,
1269 match can be used to filter the committed files. If editor is
1209 match can be used to filter the committed files. If editor is
1270 supplied, it is called to get a commit message.
1210 supplied, it is called to get a commit message.
1271 """
1211 """
1272
1212
1273 def fail(f, msg):
1213 def fail(f, msg):
1274 raise util.Abort('%s: %s' % (f, msg))
1214 raise util.Abort('%s: %s' % (f, msg))
1275
1215
1276 if not match:
1216 if not match:
1277 match = matchmod.always(self.root, '')
1217 match = matchmod.always(self.root, '')
1278
1218
1279 if not force:
1219 if not force:
1280 vdirs = []
1220 vdirs = []
1281 match.dir = vdirs.append
1221 match.dir = vdirs.append
1282 match.bad = fail
1222 match.bad = fail
1283
1223
1284 wlock = self.wlock()
1224 wlock = self.wlock()
1285 try:
1225 try:
1286 wctx = self[None]
1226 wctx = self[None]
1287 merge = len(wctx.parents()) > 1
1227 merge = len(wctx.parents()) > 1
1288
1228
1289 if (not force and merge and match and
1229 if (not force and merge and match and
1290 (match.files() or match.anypats())):
1230 (match.files() or match.anypats())):
1291 raise util.Abort(_('cannot partially commit a merge '
1231 raise util.Abort(_('cannot partially commit a merge '
1292 '(do not specify files or patterns)'))
1232 '(do not specify files or patterns)'))
1293
1233
1294 changes = self.status(match=match, clean=force)
1234 changes = self.status(match=match, clean=force)
1295 if force:
1235 if force:
1296 changes[0].extend(changes[6]) # mq may commit unchanged files
1236 changes[0].extend(changes[6]) # mq may commit unchanged files
1297
1237
1298 # check subrepos
1238 # check subrepos
1299 subs = []
1239 subs = []
1300 commitsubs = set()
1240 commitsubs = set()
1301 newstate = wctx.substate.copy()
1241 newstate = wctx.substate.copy()
1302 # only manage subrepos and .hgsubstate if .hgsub is present
1242 # only manage subrepos and .hgsubstate if .hgsub is present
1303 if '.hgsub' in wctx:
1243 if '.hgsub' in wctx:
1304 # we'll decide whether to track this ourselves, thanks
1244 # we'll decide whether to track this ourselves, thanks
1305 if '.hgsubstate' in changes[0]:
1245 if '.hgsubstate' in changes[0]:
1306 changes[0].remove('.hgsubstate')
1246 changes[0].remove('.hgsubstate')
1307 if '.hgsubstate' in changes[2]:
1247 if '.hgsubstate' in changes[2]:
1308 changes[2].remove('.hgsubstate')
1248 changes[2].remove('.hgsubstate')
1309
1249
1310 # compare current state to last committed state
1250 # compare current state to last committed state
1311 # build new substate based on last committed state
1251 # build new substate based on last committed state
1312 oldstate = wctx.p1().substate
1252 oldstate = wctx.p1().substate
1313 for s in sorted(newstate.keys()):
1253 for s in sorted(newstate.keys()):
1314 if not match(s):
1254 if not match(s):
1315 # ignore working copy, use old state if present
1255 # ignore working copy, use old state if present
1316 if s in oldstate:
1256 if s in oldstate:
1317 newstate[s] = oldstate[s]
1257 newstate[s] = oldstate[s]
1318 continue
1258 continue
1319 if not force:
1259 if not force:
1320 raise util.Abort(
1260 raise util.Abort(
1321 _("commit with new subrepo %s excluded") % s)
1261 _("commit with new subrepo %s excluded") % s)
1322 if wctx.sub(s).dirty(True):
1262 if wctx.sub(s).dirty(True):
1323 if not self.ui.configbool('ui', 'commitsubrepos'):
1263 if not self.ui.configbool('ui', 'commitsubrepos'):
1324 raise util.Abort(
1264 raise util.Abort(
1325 _("uncommitted changes in subrepo %s") % s,
1265 _("uncommitted changes in subrepo %s") % s,
1326 hint=_("use --subrepos for recursive commit"))
1266 hint=_("use --subrepos for recursive commit"))
1327 subs.append(s)
1267 subs.append(s)
1328 commitsubs.add(s)
1268 commitsubs.add(s)
1329 else:
1269 else:
1330 bs = wctx.sub(s).basestate()
1270 bs = wctx.sub(s).basestate()
1331 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1271 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1332 if oldstate.get(s, (None, None, None))[1] != bs:
1272 if oldstate.get(s, (None, None, None))[1] != bs:
1333 subs.append(s)
1273 subs.append(s)
1334
1274
1335 # check for removed subrepos
1275 # check for removed subrepos
1336 for p in wctx.parents():
1276 for p in wctx.parents():
1337 r = [s for s in p.substate if s not in newstate]
1277 r = [s for s in p.substate if s not in newstate]
1338 subs += [s for s in r if match(s)]
1278 subs += [s for s in r if match(s)]
1339 if subs:
1279 if subs:
1340 if (not match('.hgsub') and
1280 if (not match('.hgsub') and
1341 '.hgsub' in (wctx.modified() + wctx.added())):
1281 '.hgsub' in (wctx.modified() + wctx.added())):
1342 raise util.Abort(
1282 raise util.Abort(
1343 _("can't commit subrepos without .hgsub"))
1283 _("can't commit subrepos without .hgsub"))
1344 changes[0].insert(0, '.hgsubstate')
1284 changes[0].insert(0, '.hgsubstate')
1345
1285
1346 elif '.hgsub' in changes[2]:
1286 elif '.hgsub' in changes[2]:
1347 # clean up .hgsubstate when .hgsub is removed
1287 # clean up .hgsubstate when .hgsub is removed
1348 if ('.hgsubstate' in wctx and
1288 if ('.hgsubstate' in wctx and
1349 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1289 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1350 changes[2].insert(0, '.hgsubstate')
1290 changes[2].insert(0, '.hgsubstate')
1351
1291
1352 # make sure all explicit patterns are matched
1292 # make sure all explicit patterns are matched
1353 if not force and match.files():
1293 if not force and match.files():
1354 matched = set(changes[0] + changes[1] + changes[2])
1294 matched = set(changes[0] + changes[1] + changes[2])
1355
1295
1356 for f in match.files():
1296 for f in match.files():
1357 f = self.dirstate.normalize(f)
1297 f = self.dirstate.normalize(f)
1358 if f == '.' or f in matched or f in wctx.substate:
1298 if f == '.' or f in matched or f in wctx.substate:
1359 continue
1299 continue
1360 if f in changes[3]: # missing
1300 if f in changes[3]: # missing
1361 fail(f, _('file not found!'))
1301 fail(f, _('file not found!'))
1362 if f in vdirs: # visited directory
1302 if f in vdirs: # visited directory
1363 d = f + '/'
1303 d = f + '/'
1364 for mf in matched:
1304 for mf in matched:
1365 if mf.startswith(d):
1305 if mf.startswith(d):
1366 break
1306 break
1367 else:
1307 else:
1368 fail(f, _("no match under directory!"))
1308 fail(f, _("no match under directory!"))
1369 elif f not in self.dirstate:
1309 elif f not in self.dirstate:
1370 fail(f, _("file not tracked!"))
1310 fail(f, _("file not tracked!"))
1371
1311
1372 if (not force and not extra.get("close") and not merge
1312 if (not force and not extra.get("close") and not merge
1373 and not (changes[0] or changes[1] or changes[2])
1313 and not (changes[0] or changes[1] or changes[2])
1374 and wctx.branch() == wctx.p1().branch()):
1314 and wctx.branch() == wctx.p1().branch()):
1375 return None
1315 return None
1376
1316
1377 if merge and changes[3]:
1317 if merge and changes[3]:
1378 raise util.Abort(_("cannot commit merge with missing files"))
1318 raise util.Abort(_("cannot commit merge with missing files"))
1379
1319
1380 ms = mergemod.mergestate(self)
1320 ms = mergemod.mergestate(self)
1381 for f in changes[0]:
1321 for f in changes[0]:
1382 if f in ms and ms[f] == 'u':
1322 if f in ms and ms[f] == 'u':
1383 raise util.Abort(_("unresolved merge conflicts "
1323 raise util.Abort(_("unresolved merge conflicts "
1384 "(see hg help resolve)"))
1324 "(see hg help resolve)"))
1385
1325
1386 cctx = context.workingctx(self, text, user, date, extra, changes)
1326 cctx = context.workingctx(self, text, user, date, extra, changes)
1387 if editor:
1327 if editor:
1388 cctx._text = editor(self, cctx, subs)
1328 cctx._text = editor(self, cctx, subs)
1389 edited = (text != cctx._text)
1329 edited = (text != cctx._text)
1390
1330
1391 # commit subs and write new state
1331 # commit subs and write new state
1392 if subs:
1332 if subs:
1393 for s in sorted(commitsubs):
1333 for s in sorted(commitsubs):
1394 sub = wctx.sub(s)
1334 sub = wctx.sub(s)
1395 self.ui.status(_('committing subrepository %s\n') %
1335 self.ui.status(_('committing subrepository %s\n') %
1396 subrepo.subrelpath(sub))
1336 subrepo.subrelpath(sub))
1397 sr = sub.commit(cctx._text, user, date)
1337 sr = sub.commit(cctx._text, user, date)
1398 newstate[s] = (newstate[s][0], sr)
1338 newstate[s] = (newstate[s][0], sr)
1399 subrepo.writestate(self, newstate)
1339 subrepo.writestate(self, newstate)
1400
1340
1401 # Save commit message in case this transaction gets rolled back
1341 # Save commit message in case this transaction gets rolled back
1402 # (e.g. by a pretxncommit hook). Leave the content alone on
1342 # (e.g. by a pretxncommit hook). Leave the content alone on
1403 # the assumption that the user will use the same editor again.
1343 # the assumption that the user will use the same editor again.
1404 msgfn = self.savecommitmessage(cctx._text)
1344 msgfn = self.savecommitmessage(cctx._text)
1405
1345
1406 p1, p2 = self.dirstate.parents()
1346 p1, p2 = self.dirstate.parents()
1407 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1347 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1408 try:
1348 try:
1409 self.hook("precommit", throw=True, parent1=hookp1,
1349 self.hook("precommit", throw=True, parent1=hookp1,
1410 parent2=hookp2)
1350 parent2=hookp2)
1411 ret = self.commitctx(cctx, True)
1351 ret = self.commitctx(cctx, True)
1412 except: # re-raises
1352 except: # re-raises
1413 if edited:
1353 if edited:
1414 self.ui.write(
1354 self.ui.write(
1415 _('note: commit message saved in %s\n') % msgfn)
1355 _('note: commit message saved in %s\n') % msgfn)
1416 raise
1356 raise
1417
1357
1418 # update bookmarks, dirstate and mergestate
1358 # update bookmarks, dirstate and mergestate
1419 bookmarks.update(self, [p1, p2], ret)
1359 bookmarks.update(self, [p1, p2], ret)
1420 for f in changes[0] + changes[1]:
1360 for f in changes[0] + changes[1]:
1421 self.dirstate.normal(f)
1361 self.dirstate.normal(f)
1422 for f in changes[2]:
1362 for f in changes[2]:
1423 self.dirstate.drop(f)
1363 self.dirstate.drop(f)
1424 self.dirstate.setparents(ret)
1364 self.dirstate.setparents(ret)
1425 ms.reset()
1365 ms.reset()
1426 finally:
1366 finally:
1427 wlock.release()
1367 wlock.release()
1428
1368
1429 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1369 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1430 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1370 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1431 self._afterlock(commithook)
1371 self._afterlock(commithook)
1432 return ret
1372 return ret
1433
1373
1434 @unfilteredmethod
1374 @unfilteredmethod
1435 def commitctx(self, ctx, error=False):
1375 def commitctx(self, ctx, error=False):
1436 """Add a new revision to current repository.
1376 """Add a new revision to current repository.
1437 Revision information is passed via the context argument.
1377 Revision information is passed via the context argument.
1438 """
1378 """
1439
1379
1440 tr = lock = None
1380 tr = lock = None
1441 removed = list(ctx.removed())
1381 removed = list(ctx.removed())
1442 p1, p2 = ctx.p1(), ctx.p2()
1382 p1, p2 = ctx.p1(), ctx.p2()
1443 user = ctx.user()
1383 user = ctx.user()
1444
1384
1445 lock = self.lock()
1385 lock = self.lock()
1446 try:
1386 try:
1447 tr = self.transaction("commit")
1387 tr = self.transaction("commit")
1448 trp = weakref.proxy(tr)
1388 trp = weakref.proxy(tr)
1449
1389
1450 if ctx.files():
1390 if ctx.files():
1451 m1 = p1.manifest().copy()
1391 m1 = p1.manifest().copy()
1452 m2 = p2.manifest()
1392 m2 = p2.manifest()
1453
1393
1454 # check in files
1394 # check in files
1455 new = {}
1395 new = {}
1456 changed = []
1396 changed = []
1457 linkrev = len(self)
1397 linkrev = len(self)
1458 for f in sorted(ctx.modified() + ctx.added()):
1398 for f in sorted(ctx.modified() + ctx.added()):
1459 self.ui.note(f + "\n")
1399 self.ui.note(f + "\n")
1460 try:
1400 try:
1461 fctx = ctx[f]
1401 fctx = ctx[f]
1462 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1402 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1463 changed)
1403 changed)
1464 m1.set(f, fctx.flags())
1404 m1.set(f, fctx.flags())
1465 except OSError, inst:
1405 except OSError, inst:
1466 self.ui.warn(_("trouble committing %s!\n") % f)
1406 self.ui.warn(_("trouble committing %s!\n") % f)
1467 raise
1407 raise
1468 except IOError, inst:
1408 except IOError, inst:
1469 errcode = getattr(inst, 'errno', errno.ENOENT)
1409 errcode = getattr(inst, 'errno', errno.ENOENT)
1470 if error or errcode and errcode != errno.ENOENT:
1410 if error or errcode and errcode != errno.ENOENT:
1471 self.ui.warn(_("trouble committing %s!\n") % f)
1411 self.ui.warn(_("trouble committing %s!\n") % f)
1472 raise
1412 raise
1473 else:
1413 else:
1474 removed.append(f)
1414 removed.append(f)
1475
1415
1476 # update manifest
1416 # update manifest
1477 m1.update(new)
1417 m1.update(new)
1478 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1418 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1479 drop = [f for f in removed if f in m1]
1419 drop = [f for f in removed if f in m1]
1480 for f in drop:
1420 for f in drop:
1481 del m1[f]
1421 del m1[f]
1482 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1422 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1483 p2.manifestnode(), (new, drop))
1423 p2.manifestnode(), (new, drop))
1484 files = changed + removed
1424 files = changed + removed
1485 else:
1425 else:
1486 mn = p1.manifestnode()
1426 mn = p1.manifestnode()
1487 files = []
1427 files = []
1488
1428
1489 # update changelog
1429 # update changelog
1490 self.changelog.delayupdate()
1430 self.changelog.delayupdate()
1491 n = self.changelog.add(mn, files, ctx.description(),
1431 n = self.changelog.add(mn, files, ctx.description(),
1492 trp, p1.node(), p2.node(),
1432 trp, p1.node(), p2.node(),
1493 user, ctx.date(), ctx.extra().copy())
1433 user, ctx.date(), ctx.extra().copy())
1494 p = lambda: self.changelog.writepending() and self.root or ""
1434 p = lambda: self.changelog.writepending() and self.root or ""
1495 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1435 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1496 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1436 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1497 parent2=xp2, pending=p)
1437 parent2=xp2, pending=p)
1498 self.changelog.finalize(trp)
1438 self.changelog.finalize(trp)
1499 # set the new commit is proper phase
1439 # set the new commit is proper phase
1500 targetphase = phases.newcommitphase(self.ui)
1440 targetphase = phases.newcommitphase(self.ui)
1501 if targetphase:
1441 if targetphase:
1502 # retract boundary do not alter parent changeset.
1442 # retract boundary do not alter parent changeset.
1503 # if a parent have higher the resulting phase will
1443 # if a parent have higher the resulting phase will
1504 # be compliant anyway
1444 # be compliant anyway
1505 #
1445 #
1506 # if minimal phase was 0 we don't need to retract anything
1446 # if minimal phase was 0 we don't need to retract anything
1507 phases.retractboundary(self, targetphase, [n])
1447 phases.retractboundary(self, targetphase, [n])
1508 tr.close()
1448 tr.close()
1509 self.updatebranchcache()
1449 self.updatebranchcache()
1510 return n
1450 return n
1511 finally:
1451 finally:
1512 if tr:
1452 if tr:
1513 tr.release()
1453 tr.release()
1514 lock.release()
1454 lock.release()
1515
1455
1516 @unfilteredmethod
1456 @unfilteredmethod
1517 def destroyed(self, newheadnodes=None):
1457 def destroyed(self, newheadnodes=None):
1518 '''Inform the repository that nodes have been destroyed.
1458 '''Inform the repository that nodes have been destroyed.
1519 Intended for use by strip and rollback, so there's a common
1459 Intended for use by strip and rollback, so there's a common
1520 place for anything that has to be done after destroying history.
1460 place for anything that has to be done after destroying history.
1521
1461
1522 If you know the branchheadcache was uptodate before nodes were removed
1462 If you know the branchheadcache was uptodate before nodes were removed
1523 and you also know the set of candidate new heads that may have resulted
1463 and you also know the set of candidate new heads that may have resulted
1524 from the destruction, you can set newheadnodes. This will enable the
1464 from the destruction, you can set newheadnodes. This will enable the
1525 code to update the branchheads cache, rather than having future code
1465 code to update the branchheads cache, rather than having future code
1526 decide it's invalid and regenerating it from scratch.
1466 decide it's invalid and regenerating it from scratch.
1527 '''
1467 '''
1528 # If we have info, newheadnodes, on how to update the branch cache, do
1468 # If we have info, newheadnodes, on how to update the branch cache, do
1529 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1469 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1530 # will be caught the next time it is read.
1470 # will be caught the next time it is read.
1531 if newheadnodes:
1471 if newheadnodes:
1532 tiprev = len(self) - 1
1472 tiprev = len(self) - 1
1533 ctxgen = (self[node] for node in newheadnodes
1473 ctxgen = (self[node] for node in newheadnodes
1534 if self.changelog.hasnode(node))
1474 if self.changelog.hasnode(node))
1535 self._updatebranchcache(self._branchcache, ctxgen)
1475 branchmap.update(self, self._branchcache, ctxgen)
1536 branchmap.write(self, self._branchcache, self.changelog.tip(),
1476 branchmap.write(self, self._branchcache, self.changelog.tip(),
1537 tiprev)
1477 tiprev)
1538
1478
1539 # Ensure the persistent tag cache is updated. Doing it now
1479 # Ensure the persistent tag cache is updated. Doing it now
1540 # means that the tag cache only has to worry about destroyed
1480 # means that the tag cache only has to worry about destroyed
1541 # heads immediately after a strip/rollback. That in turn
1481 # heads immediately after a strip/rollback. That in turn
1542 # guarantees that "cachetip == currenttip" (comparing both rev
1482 # guarantees that "cachetip == currenttip" (comparing both rev
1543 # and node) always means no nodes have been added or destroyed.
1483 # and node) always means no nodes have been added or destroyed.
1544
1484
1545 # XXX this is suboptimal when qrefresh'ing: we strip the current
1485 # XXX this is suboptimal when qrefresh'ing: we strip the current
1546 # head, refresh the tag cache, then immediately add a new head.
1486 # head, refresh the tag cache, then immediately add a new head.
1547 # But I think doing it this way is necessary for the "instant
1487 # But I think doing it this way is necessary for the "instant
1548 # tag cache retrieval" case to work.
1488 # tag cache retrieval" case to work.
1549 self.invalidatecaches()
1489 self.invalidatecaches()
1550
1490
1551 # Discard all cache entries to force reloading everything.
1491 # Discard all cache entries to force reloading everything.
1552 self._filecache.clear()
1492 self._filecache.clear()
1553
1493
1554 def walk(self, match, node=None):
1494 def walk(self, match, node=None):
1555 '''
1495 '''
1556 walk recursively through the directory tree or a given
1496 walk recursively through the directory tree or a given
1557 changeset, finding all files matched by the match
1497 changeset, finding all files matched by the match
1558 function
1498 function
1559 '''
1499 '''
1560 return self[node].walk(match)
1500 return self[node].walk(match)
1561
1501
1562 def status(self, node1='.', node2=None, match=None,
1502 def status(self, node1='.', node2=None, match=None,
1563 ignored=False, clean=False, unknown=False,
1503 ignored=False, clean=False, unknown=False,
1564 listsubrepos=False):
1504 listsubrepos=False):
1565 """return status of files between two nodes or node and working
1505 """return status of files between two nodes or node and working
1566 directory.
1506 directory.
1567
1507
1568 If node1 is None, use the first dirstate parent instead.
1508 If node1 is None, use the first dirstate parent instead.
1569 If node2 is None, compare node1 with working directory.
1509 If node2 is None, compare node1 with working directory.
1570 """
1510 """
1571
1511
1572 def mfmatches(ctx):
1512 def mfmatches(ctx):
1573 mf = ctx.manifest().copy()
1513 mf = ctx.manifest().copy()
1574 if match.always():
1514 if match.always():
1575 return mf
1515 return mf
1576 for fn in mf.keys():
1516 for fn in mf.keys():
1577 if not match(fn):
1517 if not match(fn):
1578 del mf[fn]
1518 del mf[fn]
1579 return mf
1519 return mf
1580
1520
1581 if isinstance(node1, context.changectx):
1521 if isinstance(node1, context.changectx):
1582 ctx1 = node1
1522 ctx1 = node1
1583 else:
1523 else:
1584 ctx1 = self[node1]
1524 ctx1 = self[node1]
1585 if isinstance(node2, context.changectx):
1525 if isinstance(node2, context.changectx):
1586 ctx2 = node2
1526 ctx2 = node2
1587 else:
1527 else:
1588 ctx2 = self[node2]
1528 ctx2 = self[node2]
1589
1529
1590 working = ctx2.rev() is None
1530 working = ctx2.rev() is None
1591 parentworking = working and ctx1 == self['.']
1531 parentworking = working and ctx1 == self['.']
1592 match = match or matchmod.always(self.root, self.getcwd())
1532 match = match or matchmod.always(self.root, self.getcwd())
1593 listignored, listclean, listunknown = ignored, clean, unknown
1533 listignored, listclean, listunknown = ignored, clean, unknown
1594
1534
1595 # load earliest manifest first for caching reasons
1535 # load earliest manifest first for caching reasons
1596 if not working and ctx2.rev() < ctx1.rev():
1536 if not working and ctx2.rev() < ctx1.rev():
1597 ctx2.manifest()
1537 ctx2.manifest()
1598
1538
1599 if not parentworking:
1539 if not parentworking:
1600 def bad(f, msg):
1540 def bad(f, msg):
1601 # 'f' may be a directory pattern from 'match.files()',
1541 # 'f' may be a directory pattern from 'match.files()',
1602 # so 'f not in ctx1' is not enough
1542 # so 'f not in ctx1' is not enough
1603 if f not in ctx1 and f not in ctx1.dirs():
1543 if f not in ctx1 and f not in ctx1.dirs():
1604 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1544 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1605 match.bad = bad
1545 match.bad = bad
1606
1546
1607 if working: # we need to scan the working dir
1547 if working: # we need to scan the working dir
1608 subrepos = []
1548 subrepos = []
1609 if '.hgsub' in self.dirstate:
1549 if '.hgsub' in self.dirstate:
1610 subrepos = ctx2.substate.keys()
1550 subrepos = ctx2.substate.keys()
1611 s = self.dirstate.status(match, subrepos, listignored,
1551 s = self.dirstate.status(match, subrepos, listignored,
1612 listclean, listunknown)
1552 listclean, listunknown)
1613 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1553 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1614
1554
1615 # check for any possibly clean files
1555 # check for any possibly clean files
1616 if parentworking and cmp:
1556 if parentworking and cmp:
1617 fixup = []
1557 fixup = []
1618 # do a full compare of any files that might have changed
1558 # do a full compare of any files that might have changed
1619 for f in sorted(cmp):
1559 for f in sorted(cmp):
1620 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1560 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1621 or ctx1[f].cmp(ctx2[f])):
1561 or ctx1[f].cmp(ctx2[f])):
1622 modified.append(f)
1562 modified.append(f)
1623 else:
1563 else:
1624 fixup.append(f)
1564 fixup.append(f)
1625
1565
1626 # update dirstate for files that are actually clean
1566 # update dirstate for files that are actually clean
1627 if fixup:
1567 if fixup:
1628 if listclean:
1568 if listclean:
1629 clean += fixup
1569 clean += fixup
1630
1570
1631 try:
1571 try:
1632 # updating the dirstate is optional
1572 # updating the dirstate is optional
1633 # so we don't wait on the lock
1573 # so we don't wait on the lock
1634 wlock = self.wlock(False)
1574 wlock = self.wlock(False)
1635 try:
1575 try:
1636 for f in fixup:
1576 for f in fixup:
1637 self.dirstate.normal(f)
1577 self.dirstate.normal(f)
1638 finally:
1578 finally:
1639 wlock.release()
1579 wlock.release()
1640 except error.LockError:
1580 except error.LockError:
1641 pass
1581 pass
1642
1582
1643 if not parentworking:
1583 if not parentworking:
1644 mf1 = mfmatches(ctx1)
1584 mf1 = mfmatches(ctx1)
1645 if working:
1585 if working:
1646 # we are comparing working dir against non-parent
1586 # we are comparing working dir against non-parent
1647 # generate a pseudo-manifest for the working dir
1587 # generate a pseudo-manifest for the working dir
1648 mf2 = mfmatches(self['.'])
1588 mf2 = mfmatches(self['.'])
1649 for f in cmp + modified + added:
1589 for f in cmp + modified + added:
1650 mf2[f] = None
1590 mf2[f] = None
1651 mf2.set(f, ctx2.flags(f))
1591 mf2.set(f, ctx2.flags(f))
1652 for f in removed:
1592 for f in removed:
1653 if f in mf2:
1593 if f in mf2:
1654 del mf2[f]
1594 del mf2[f]
1655 else:
1595 else:
1656 # we are comparing two revisions
1596 # we are comparing two revisions
1657 deleted, unknown, ignored = [], [], []
1597 deleted, unknown, ignored = [], [], []
1658 mf2 = mfmatches(ctx2)
1598 mf2 = mfmatches(ctx2)
1659
1599
1660 modified, added, clean = [], [], []
1600 modified, added, clean = [], [], []
1661 withflags = mf1.withflags() | mf2.withflags()
1601 withflags = mf1.withflags() | mf2.withflags()
1662 for fn in mf2:
1602 for fn in mf2:
1663 if fn in mf1:
1603 if fn in mf1:
1664 if (fn not in deleted and
1604 if (fn not in deleted and
1665 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1605 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1666 (mf1[fn] != mf2[fn] and
1606 (mf1[fn] != mf2[fn] and
1667 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1607 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1668 modified.append(fn)
1608 modified.append(fn)
1669 elif listclean:
1609 elif listclean:
1670 clean.append(fn)
1610 clean.append(fn)
1671 del mf1[fn]
1611 del mf1[fn]
1672 elif fn not in deleted:
1612 elif fn not in deleted:
1673 added.append(fn)
1613 added.append(fn)
1674 removed = mf1.keys()
1614 removed = mf1.keys()
1675
1615
1676 if working and modified and not self.dirstate._checklink:
1616 if working and modified and not self.dirstate._checklink:
1677 # Symlink placeholders may get non-symlink-like contents
1617 # Symlink placeholders may get non-symlink-like contents
1678 # via user error or dereferencing by NFS or Samba servers,
1618 # via user error or dereferencing by NFS or Samba servers,
1679 # so we filter out any placeholders that don't look like a
1619 # so we filter out any placeholders that don't look like a
1680 # symlink
1620 # symlink
1681 sane = []
1621 sane = []
1682 for f in modified:
1622 for f in modified:
1683 if ctx2.flags(f) == 'l':
1623 if ctx2.flags(f) == 'l':
1684 d = ctx2[f].data()
1624 d = ctx2[f].data()
1685 if len(d) >= 1024 or '\n' in d or util.binary(d):
1625 if len(d) >= 1024 or '\n' in d or util.binary(d):
1686 self.ui.debug('ignoring suspect symlink placeholder'
1626 self.ui.debug('ignoring suspect symlink placeholder'
1687 ' "%s"\n' % f)
1627 ' "%s"\n' % f)
1688 continue
1628 continue
1689 sane.append(f)
1629 sane.append(f)
1690 modified = sane
1630 modified = sane
1691
1631
1692 r = modified, added, removed, deleted, unknown, ignored, clean
1632 r = modified, added, removed, deleted, unknown, ignored, clean
1693
1633
1694 if listsubrepos:
1634 if listsubrepos:
1695 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1635 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1696 if working:
1636 if working:
1697 rev2 = None
1637 rev2 = None
1698 else:
1638 else:
1699 rev2 = ctx2.substate[subpath][1]
1639 rev2 = ctx2.substate[subpath][1]
1700 try:
1640 try:
1701 submatch = matchmod.narrowmatcher(subpath, match)
1641 submatch = matchmod.narrowmatcher(subpath, match)
1702 s = sub.status(rev2, match=submatch, ignored=listignored,
1642 s = sub.status(rev2, match=submatch, ignored=listignored,
1703 clean=listclean, unknown=listunknown,
1643 clean=listclean, unknown=listunknown,
1704 listsubrepos=True)
1644 listsubrepos=True)
1705 for rfiles, sfiles in zip(r, s):
1645 for rfiles, sfiles in zip(r, s):
1706 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1646 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1707 except error.LookupError:
1647 except error.LookupError:
1708 self.ui.status(_("skipping missing subrepository: %s\n")
1648 self.ui.status(_("skipping missing subrepository: %s\n")
1709 % subpath)
1649 % subpath)
1710
1650
1711 for l in r:
1651 for l in r:
1712 l.sort()
1652 l.sort()
1713 return r
1653 return r
1714
1654
1715 def heads(self, start=None):
1655 def heads(self, start=None):
1716 heads = self.changelog.heads(start)
1656 heads = self.changelog.heads(start)
1717 # sort the output in rev descending order
1657 # sort the output in rev descending order
1718 return sorted(heads, key=self.changelog.rev, reverse=True)
1658 return sorted(heads, key=self.changelog.rev, reverse=True)
1719
1659
1720 def branchheads(self, branch=None, start=None, closed=False):
1660 def branchheads(self, branch=None, start=None, closed=False):
1721 '''return a (possibly filtered) list of heads for the given branch
1661 '''return a (possibly filtered) list of heads for the given branch
1722
1662
1723 Heads are returned in topological order, from newest to oldest.
1663 Heads are returned in topological order, from newest to oldest.
1724 If branch is None, use the dirstate branch.
1664 If branch is None, use the dirstate branch.
1725 If start is not None, return only heads reachable from start.
1665 If start is not None, return only heads reachable from start.
1726 If closed is True, return heads that are marked as closed as well.
1666 If closed is True, return heads that are marked as closed as well.
1727 '''
1667 '''
1728 if branch is None:
1668 if branch is None:
1729 branch = self[None].branch()
1669 branch = self[None].branch()
1730 branches = self.branchmap()
1670 branches = self.branchmap()
1731 if branch not in branches:
1671 if branch not in branches:
1732 return []
1672 return []
1733 # the cache returns heads ordered lowest to highest
1673 # the cache returns heads ordered lowest to highest
1734 bheads = list(reversed(branches[branch]))
1674 bheads = list(reversed(branches[branch]))
1735 if start is not None:
1675 if start is not None:
1736 # filter out the heads that cannot be reached from startrev
1676 # filter out the heads that cannot be reached from startrev
1737 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1677 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1738 bheads = [h for h in bheads if h in fbheads]
1678 bheads = [h for h in bheads if h in fbheads]
1739 if not closed:
1679 if not closed:
1740 bheads = [h for h in bheads if not self[h].closesbranch()]
1680 bheads = [h for h in bheads if not self[h].closesbranch()]
1741 return bheads
1681 return bheads
1742
1682
1743 def branches(self, nodes):
1683 def branches(self, nodes):
1744 if not nodes:
1684 if not nodes:
1745 nodes = [self.changelog.tip()]
1685 nodes = [self.changelog.tip()]
1746 b = []
1686 b = []
1747 for n in nodes:
1687 for n in nodes:
1748 t = n
1688 t = n
1749 while True:
1689 while True:
1750 p = self.changelog.parents(n)
1690 p = self.changelog.parents(n)
1751 if p[1] != nullid or p[0] == nullid:
1691 if p[1] != nullid or p[0] == nullid:
1752 b.append((t, n, p[0], p[1]))
1692 b.append((t, n, p[0], p[1]))
1753 break
1693 break
1754 n = p[0]
1694 n = p[0]
1755 return b
1695 return b
1756
1696
1757 def between(self, pairs):
1697 def between(self, pairs):
1758 r = []
1698 r = []
1759
1699
1760 for top, bottom in pairs:
1700 for top, bottom in pairs:
1761 n, l, i = top, [], 0
1701 n, l, i = top, [], 0
1762 f = 1
1702 f = 1
1763
1703
1764 while n != bottom and n != nullid:
1704 while n != bottom and n != nullid:
1765 p = self.changelog.parents(n)[0]
1705 p = self.changelog.parents(n)[0]
1766 if i == f:
1706 if i == f:
1767 l.append(n)
1707 l.append(n)
1768 f = f * 2
1708 f = f * 2
1769 n = p
1709 n = p
1770 i += 1
1710 i += 1
1771
1711
1772 r.append(l)
1712 r.append(l)
1773
1713
1774 return r
1714 return r
1775
1715
1776 def pull(self, remote, heads=None, force=False):
1716 def pull(self, remote, heads=None, force=False):
1777 # don't open transaction for nothing or you break future useful
1717 # don't open transaction for nothing or you break future useful
1778 # rollback call
1718 # rollback call
1779 tr = None
1719 tr = None
1780 trname = 'pull\n' + util.hidepassword(remote.url())
1720 trname = 'pull\n' + util.hidepassword(remote.url())
1781 lock = self.lock()
1721 lock = self.lock()
1782 try:
1722 try:
1783 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1723 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1784 force=force)
1724 force=force)
1785 common, fetch, rheads = tmp
1725 common, fetch, rheads = tmp
1786 if not fetch:
1726 if not fetch:
1787 self.ui.status(_("no changes found\n"))
1727 self.ui.status(_("no changes found\n"))
1788 added = []
1728 added = []
1789 result = 0
1729 result = 0
1790 else:
1730 else:
1791 tr = self.transaction(trname)
1731 tr = self.transaction(trname)
1792 if heads is None and list(common) == [nullid]:
1732 if heads is None and list(common) == [nullid]:
1793 self.ui.status(_("requesting all changes\n"))
1733 self.ui.status(_("requesting all changes\n"))
1794 elif heads is None and remote.capable('changegroupsubset'):
1734 elif heads is None and remote.capable('changegroupsubset'):
1795 # issue1320, avoid a race if remote changed after discovery
1735 # issue1320, avoid a race if remote changed after discovery
1796 heads = rheads
1736 heads = rheads
1797
1737
1798 if remote.capable('getbundle'):
1738 if remote.capable('getbundle'):
1799 cg = remote.getbundle('pull', common=common,
1739 cg = remote.getbundle('pull', common=common,
1800 heads=heads or rheads)
1740 heads=heads or rheads)
1801 elif heads is None:
1741 elif heads is None:
1802 cg = remote.changegroup(fetch, 'pull')
1742 cg = remote.changegroup(fetch, 'pull')
1803 elif not remote.capable('changegroupsubset'):
1743 elif not remote.capable('changegroupsubset'):
1804 raise util.Abort(_("partial pull cannot be done because "
1744 raise util.Abort(_("partial pull cannot be done because "
1805 "other repository doesn't support "
1745 "other repository doesn't support "
1806 "changegroupsubset."))
1746 "changegroupsubset."))
1807 else:
1747 else:
1808 cg = remote.changegroupsubset(fetch, heads, 'pull')
1748 cg = remote.changegroupsubset(fetch, heads, 'pull')
1809 clstart = len(self.changelog)
1749 clstart = len(self.changelog)
1810 result = self.addchangegroup(cg, 'pull', remote.url())
1750 result = self.addchangegroup(cg, 'pull', remote.url())
1811 clend = len(self.changelog)
1751 clend = len(self.changelog)
1812 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1752 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1813
1753
1814 # compute target subset
1754 # compute target subset
1815 if heads is None:
1755 if heads is None:
1816 # We pulled every thing possible
1756 # We pulled every thing possible
1817 # sync on everything common
1757 # sync on everything common
1818 subset = common + added
1758 subset = common + added
1819 else:
1759 else:
1820 # We pulled a specific subset
1760 # We pulled a specific subset
1821 # sync on this subset
1761 # sync on this subset
1822 subset = heads
1762 subset = heads
1823
1763
1824 # Get remote phases data from remote
1764 # Get remote phases data from remote
1825 remotephases = remote.listkeys('phases')
1765 remotephases = remote.listkeys('phases')
1826 publishing = bool(remotephases.get('publishing', False))
1766 publishing = bool(remotephases.get('publishing', False))
1827 if remotephases and not publishing:
1767 if remotephases and not publishing:
1828 # remote is new and unpublishing
1768 # remote is new and unpublishing
1829 pheads, _dr = phases.analyzeremotephases(self, subset,
1769 pheads, _dr = phases.analyzeremotephases(self, subset,
1830 remotephases)
1770 remotephases)
1831 phases.advanceboundary(self, phases.public, pheads)
1771 phases.advanceboundary(self, phases.public, pheads)
1832 phases.advanceboundary(self, phases.draft, subset)
1772 phases.advanceboundary(self, phases.draft, subset)
1833 else:
1773 else:
1834 # Remote is old or publishing all common changesets
1774 # Remote is old or publishing all common changesets
1835 # should be seen as public
1775 # should be seen as public
1836 phases.advanceboundary(self, phases.public, subset)
1776 phases.advanceboundary(self, phases.public, subset)
1837
1777
1838 if obsolete._enabled:
1778 if obsolete._enabled:
1839 self.ui.debug('fetching remote obsolete markers\n')
1779 self.ui.debug('fetching remote obsolete markers\n')
1840 remoteobs = remote.listkeys('obsolete')
1780 remoteobs = remote.listkeys('obsolete')
1841 if 'dump0' in remoteobs:
1781 if 'dump0' in remoteobs:
1842 if tr is None:
1782 if tr is None:
1843 tr = self.transaction(trname)
1783 tr = self.transaction(trname)
1844 for key in sorted(remoteobs, reverse=True):
1784 for key in sorted(remoteobs, reverse=True):
1845 if key.startswith('dump'):
1785 if key.startswith('dump'):
1846 data = base85.b85decode(remoteobs[key])
1786 data = base85.b85decode(remoteobs[key])
1847 self.obsstore.mergemarkers(tr, data)
1787 self.obsstore.mergemarkers(tr, data)
1848 self.invalidatevolatilesets()
1788 self.invalidatevolatilesets()
1849 if tr is not None:
1789 if tr is not None:
1850 tr.close()
1790 tr.close()
1851 finally:
1791 finally:
1852 if tr is not None:
1792 if tr is not None:
1853 tr.release()
1793 tr.release()
1854 lock.release()
1794 lock.release()
1855
1795
1856 return result
1796 return result
1857
1797
1858 def checkpush(self, force, revs):
1798 def checkpush(self, force, revs):
1859 """Extensions can override this function if additional checks have
1799 """Extensions can override this function if additional checks have
1860 to be performed before pushing, or call it if they override push
1800 to be performed before pushing, or call it if they override push
1861 command.
1801 command.
1862 """
1802 """
1863 pass
1803 pass
1864
1804
1865 def push(self, remote, force=False, revs=None, newbranch=False):
1805 def push(self, remote, force=False, revs=None, newbranch=False):
1866 '''Push outgoing changesets (limited by revs) from the current
1806 '''Push outgoing changesets (limited by revs) from the current
1867 repository to remote. Return an integer:
1807 repository to remote. Return an integer:
1868 - None means nothing to push
1808 - None means nothing to push
1869 - 0 means HTTP error
1809 - 0 means HTTP error
1870 - 1 means we pushed and remote head count is unchanged *or*
1810 - 1 means we pushed and remote head count is unchanged *or*
1871 we have outgoing changesets but refused to push
1811 we have outgoing changesets but refused to push
1872 - other values as described by addchangegroup()
1812 - other values as described by addchangegroup()
1873 '''
1813 '''
1874 # there are two ways to push to remote repo:
1814 # there are two ways to push to remote repo:
1875 #
1815 #
1876 # addchangegroup assumes local user can lock remote
1816 # addchangegroup assumes local user can lock remote
1877 # repo (local filesystem, old ssh servers).
1817 # repo (local filesystem, old ssh servers).
1878 #
1818 #
1879 # unbundle assumes local user cannot lock remote repo (new ssh
1819 # unbundle assumes local user cannot lock remote repo (new ssh
1880 # servers, http servers).
1820 # servers, http servers).
1881
1821
1882 if not remote.canpush():
1822 if not remote.canpush():
1883 raise util.Abort(_("destination does not support push"))
1823 raise util.Abort(_("destination does not support push"))
1884 unfi = self.unfiltered()
1824 unfi = self.unfiltered()
1885 # get local lock as we might write phase data
1825 # get local lock as we might write phase data
1886 locallock = self.lock()
1826 locallock = self.lock()
1887 try:
1827 try:
1888 self.checkpush(force, revs)
1828 self.checkpush(force, revs)
1889 lock = None
1829 lock = None
1890 unbundle = remote.capable('unbundle')
1830 unbundle = remote.capable('unbundle')
1891 if not unbundle:
1831 if not unbundle:
1892 lock = remote.lock()
1832 lock = remote.lock()
1893 try:
1833 try:
1894 # discovery
1834 # discovery
1895 fci = discovery.findcommonincoming
1835 fci = discovery.findcommonincoming
1896 commoninc = fci(unfi, remote, force=force)
1836 commoninc = fci(unfi, remote, force=force)
1897 common, inc, remoteheads = commoninc
1837 common, inc, remoteheads = commoninc
1898 fco = discovery.findcommonoutgoing
1838 fco = discovery.findcommonoutgoing
1899 outgoing = fco(unfi, remote, onlyheads=revs,
1839 outgoing = fco(unfi, remote, onlyheads=revs,
1900 commoninc=commoninc, force=force)
1840 commoninc=commoninc, force=force)
1901
1841
1902
1842
1903 if not outgoing.missing:
1843 if not outgoing.missing:
1904 # nothing to push
1844 # nothing to push
1905 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1845 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1906 ret = None
1846 ret = None
1907 else:
1847 else:
1908 # something to push
1848 # something to push
1909 if not force:
1849 if not force:
1910 # if self.obsstore == False --> no obsolete
1850 # if self.obsstore == False --> no obsolete
1911 # then, save the iteration
1851 # then, save the iteration
1912 if unfi.obsstore:
1852 if unfi.obsstore:
1913 # this message are here for 80 char limit reason
1853 # this message are here for 80 char limit reason
1914 mso = _("push includes obsolete changeset: %s!")
1854 mso = _("push includes obsolete changeset: %s!")
1915 msu = _("push includes unstable changeset: %s!")
1855 msu = _("push includes unstable changeset: %s!")
1916 msb = _("push includes bumped changeset: %s!")
1856 msb = _("push includes bumped changeset: %s!")
1917 msd = _("push includes divergent changeset: %s!")
1857 msd = _("push includes divergent changeset: %s!")
1918 # If we are to push if there is at least one
1858 # If we are to push if there is at least one
1919 # obsolete or unstable changeset in missing, at
1859 # obsolete or unstable changeset in missing, at
1920 # least one of the missinghead will be obsolete or
1860 # least one of the missinghead will be obsolete or
1921 # unstable. So checking heads only is ok
1861 # unstable. So checking heads only is ok
1922 for node in outgoing.missingheads:
1862 for node in outgoing.missingheads:
1923 ctx = unfi[node]
1863 ctx = unfi[node]
1924 if ctx.obsolete():
1864 if ctx.obsolete():
1925 raise util.Abort(mso % ctx)
1865 raise util.Abort(mso % ctx)
1926 elif ctx.unstable():
1866 elif ctx.unstable():
1927 raise util.Abort(msu % ctx)
1867 raise util.Abort(msu % ctx)
1928 elif ctx.bumped():
1868 elif ctx.bumped():
1929 raise util.Abort(msb % ctx)
1869 raise util.Abort(msb % ctx)
1930 elif ctx.divergent():
1870 elif ctx.divergent():
1931 raise util.Abort(msd % ctx)
1871 raise util.Abort(msd % ctx)
1932 discovery.checkheads(unfi, remote, outgoing,
1872 discovery.checkheads(unfi, remote, outgoing,
1933 remoteheads, newbranch,
1873 remoteheads, newbranch,
1934 bool(inc))
1874 bool(inc))
1935
1875
1936 # create a changegroup from local
1876 # create a changegroup from local
1937 if revs is None and not outgoing.excluded:
1877 if revs is None and not outgoing.excluded:
1938 # push everything,
1878 # push everything,
1939 # use the fast path, no race possible on push
1879 # use the fast path, no race possible on push
1940 cg = self._changegroup(outgoing.missing, 'push')
1880 cg = self._changegroup(outgoing.missing, 'push')
1941 else:
1881 else:
1942 cg = self.getlocalbundle('push', outgoing)
1882 cg = self.getlocalbundle('push', outgoing)
1943
1883
1944 # apply changegroup to remote
1884 # apply changegroup to remote
1945 if unbundle:
1885 if unbundle:
1946 # local repo finds heads on server, finds out what
1886 # local repo finds heads on server, finds out what
1947 # revs it must push. once revs transferred, if server
1887 # revs it must push. once revs transferred, if server
1948 # finds it has different heads (someone else won
1888 # finds it has different heads (someone else won
1949 # commit/push race), server aborts.
1889 # commit/push race), server aborts.
1950 if force:
1890 if force:
1951 remoteheads = ['force']
1891 remoteheads = ['force']
1952 # ssh: return remote's addchangegroup()
1892 # ssh: return remote's addchangegroup()
1953 # http: return remote's addchangegroup() or 0 for error
1893 # http: return remote's addchangegroup() or 0 for error
1954 ret = remote.unbundle(cg, remoteheads, 'push')
1894 ret = remote.unbundle(cg, remoteheads, 'push')
1955 else:
1895 else:
1956 # we return an integer indicating remote head count
1896 # we return an integer indicating remote head count
1957 # change
1897 # change
1958 ret = remote.addchangegroup(cg, 'push', self.url())
1898 ret = remote.addchangegroup(cg, 'push', self.url())
1959
1899
1960 if ret:
1900 if ret:
1961 # push succeed, synchronize target of the push
1901 # push succeed, synchronize target of the push
1962 cheads = outgoing.missingheads
1902 cheads = outgoing.missingheads
1963 elif revs is None:
1903 elif revs is None:
1964 # All out push fails. synchronize all common
1904 # All out push fails. synchronize all common
1965 cheads = outgoing.commonheads
1905 cheads = outgoing.commonheads
1966 else:
1906 else:
1967 # I want cheads = heads(::missingheads and ::commonheads)
1907 # I want cheads = heads(::missingheads and ::commonheads)
1968 # (missingheads is revs with secret changeset filtered out)
1908 # (missingheads is revs with secret changeset filtered out)
1969 #
1909 #
1970 # This can be expressed as:
1910 # This can be expressed as:
1971 # cheads = ( (missingheads and ::commonheads)
1911 # cheads = ( (missingheads and ::commonheads)
1972 # + (commonheads and ::missingheads))"
1912 # + (commonheads and ::missingheads))"
1973 # )
1913 # )
1974 #
1914 #
1975 # while trying to push we already computed the following:
1915 # while trying to push we already computed the following:
1976 # common = (::commonheads)
1916 # common = (::commonheads)
1977 # missing = ((commonheads::missingheads) - commonheads)
1917 # missing = ((commonheads::missingheads) - commonheads)
1978 #
1918 #
1979 # We can pick:
1919 # We can pick:
1980 # * missingheads part of common (::commonheads)
1920 # * missingheads part of common (::commonheads)
1981 common = set(outgoing.common)
1921 common = set(outgoing.common)
1982 cheads = [node for node in revs if node in common]
1922 cheads = [node for node in revs if node in common]
1983 # and
1923 # and
1984 # * commonheads parents on missing
1924 # * commonheads parents on missing
1985 revset = unfi.set('%ln and parents(roots(%ln))',
1925 revset = unfi.set('%ln and parents(roots(%ln))',
1986 outgoing.commonheads,
1926 outgoing.commonheads,
1987 outgoing.missing)
1927 outgoing.missing)
1988 cheads.extend(c.node() for c in revset)
1928 cheads.extend(c.node() for c in revset)
1989 # even when we don't push, exchanging phase data is useful
1929 # even when we don't push, exchanging phase data is useful
1990 remotephases = remote.listkeys('phases')
1930 remotephases = remote.listkeys('phases')
1991 if not remotephases: # old server or public only repo
1931 if not remotephases: # old server or public only repo
1992 phases.advanceboundary(self, phases.public, cheads)
1932 phases.advanceboundary(self, phases.public, cheads)
1993 # don't push any phase data as there is nothing to push
1933 # don't push any phase data as there is nothing to push
1994 else:
1934 else:
1995 ana = phases.analyzeremotephases(self, cheads, remotephases)
1935 ana = phases.analyzeremotephases(self, cheads, remotephases)
1996 pheads, droots = ana
1936 pheads, droots = ana
1997 ### Apply remote phase on local
1937 ### Apply remote phase on local
1998 if remotephases.get('publishing', False):
1938 if remotephases.get('publishing', False):
1999 phases.advanceboundary(self, phases.public, cheads)
1939 phases.advanceboundary(self, phases.public, cheads)
2000 else: # publish = False
1940 else: # publish = False
2001 phases.advanceboundary(self, phases.public, pheads)
1941 phases.advanceboundary(self, phases.public, pheads)
2002 phases.advanceboundary(self, phases.draft, cheads)
1942 phases.advanceboundary(self, phases.draft, cheads)
2003 ### Apply local phase on remote
1943 ### Apply local phase on remote
2004
1944
2005 # Get the list of all revs draft on remote by public here.
1945 # Get the list of all revs draft on remote by public here.
2006 # XXX Beware that revset break if droots is not strictly
1946 # XXX Beware that revset break if droots is not strictly
2007 # XXX root we may want to ensure it is but it is costly
1947 # XXX root we may want to ensure it is but it is costly
2008 outdated = unfi.set('heads((%ln::%ln) and public())',
1948 outdated = unfi.set('heads((%ln::%ln) and public())',
2009 droots, cheads)
1949 droots, cheads)
2010 for newremotehead in outdated:
1950 for newremotehead in outdated:
2011 r = remote.pushkey('phases',
1951 r = remote.pushkey('phases',
2012 newremotehead.hex(),
1952 newremotehead.hex(),
2013 str(phases.draft),
1953 str(phases.draft),
2014 str(phases.public))
1954 str(phases.public))
2015 if not r:
1955 if not r:
2016 self.ui.warn(_('updating %s to public failed!\n')
1956 self.ui.warn(_('updating %s to public failed!\n')
2017 % newremotehead)
1957 % newremotehead)
2018 self.ui.debug('try to push obsolete markers to remote\n')
1958 self.ui.debug('try to push obsolete markers to remote\n')
2019 if (obsolete._enabled and self.obsstore and
1959 if (obsolete._enabled and self.obsstore and
2020 'obsolete' in remote.listkeys('namespaces')):
1960 'obsolete' in remote.listkeys('namespaces')):
2021 rslts = []
1961 rslts = []
2022 remotedata = self.listkeys('obsolete')
1962 remotedata = self.listkeys('obsolete')
2023 for key in sorted(remotedata, reverse=True):
1963 for key in sorted(remotedata, reverse=True):
2024 # reverse sort to ensure we end with dump0
1964 # reverse sort to ensure we end with dump0
2025 data = remotedata[key]
1965 data = remotedata[key]
2026 rslts.append(remote.pushkey('obsolete', key, '', data))
1966 rslts.append(remote.pushkey('obsolete', key, '', data))
2027 if [r for r in rslts if not r]:
1967 if [r for r in rslts if not r]:
2028 msg = _('failed to push some obsolete markers!\n')
1968 msg = _('failed to push some obsolete markers!\n')
2029 self.ui.warn(msg)
1969 self.ui.warn(msg)
2030 finally:
1970 finally:
2031 if lock is not None:
1971 if lock is not None:
2032 lock.release()
1972 lock.release()
2033 finally:
1973 finally:
2034 locallock.release()
1974 locallock.release()
2035
1975
2036 self.ui.debug("checking for updated bookmarks\n")
1976 self.ui.debug("checking for updated bookmarks\n")
2037 rb = remote.listkeys('bookmarks')
1977 rb = remote.listkeys('bookmarks')
2038 for k in rb.keys():
1978 for k in rb.keys():
2039 if k in unfi._bookmarks:
1979 if k in unfi._bookmarks:
2040 nr, nl = rb[k], hex(self._bookmarks[k])
1980 nr, nl = rb[k], hex(self._bookmarks[k])
2041 if nr in unfi:
1981 if nr in unfi:
2042 cr = unfi[nr]
1982 cr = unfi[nr]
2043 cl = unfi[nl]
1983 cl = unfi[nl]
2044 if bookmarks.validdest(unfi, cr, cl):
1984 if bookmarks.validdest(unfi, cr, cl):
2045 r = remote.pushkey('bookmarks', k, nr, nl)
1985 r = remote.pushkey('bookmarks', k, nr, nl)
2046 if r:
1986 if r:
2047 self.ui.status(_("updating bookmark %s\n") % k)
1987 self.ui.status(_("updating bookmark %s\n") % k)
2048 else:
1988 else:
2049 self.ui.warn(_('updating bookmark %s'
1989 self.ui.warn(_('updating bookmark %s'
2050 ' failed!\n') % k)
1990 ' failed!\n') % k)
2051
1991
2052 return ret
1992 return ret
2053
1993
2054 def changegroupinfo(self, nodes, source):
1994 def changegroupinfo(self, nodes, source):
2055 if self.ui.verbose or source == 'bundle':
1995 if self.ui.verbose or source == 'bundle':
2056 self.ui.status(_("%d changesets found\n") % len(nodes))
1996 self.ui.status(_("%d changesets found\n") % len(nodes))
2057 if self.ui.debugflag:
1997 if self.ui.debugflag:
2058 self.ui.debug("list of changesets:\n")
1998 self.ui.debug("list of changesets:\n")
2059 for node in nodes:
1999 for node in nodes:
2060 self.ui.debug("%s\n" % hex(node))
2000 self.ui.debug("%s\n" % hex(node))
2061
2001
2062 def changegroupsubset(self, bases, heads, source):
2002 def changegroupsubset(self, bases, heads, source):
2063 """Compute a changegroup consisting of all the nodes that are
2003 """Compute a changegroup consisting of all the nodes that are
2064 descendants of any of the bases and ancestors of any of the heads.
2004 descendants of any of the bases and ancestors of any of the heads.
2065 Return a chunkbuffer object whose read() method will return
2005 Return a chunkbuffer object whose read() method will return
2066 successive changegroup chunks.
2006 successive changegroup chunks.
2067
2007
2068 It is fairly complex as determining which filenodes and which
2008 It is fairly complex as determining which filenodes and which
2069 manifest nodes need to be included for the changeset to be complete
2009 manifest nodes need to be included for the changeset to be complete
2070 is non-trivial.
2010 is non-trivial.
2071
2011
2072 Another wrinkle is doing the reverse, figuring out which changeset in
2012 Another wrinkle is doing the reverse, figuring out which changeset in
2073 the changegroup a particular filenode or manifestnode belongs to.
2013 the changegroup a particular filenode or manifestnode belongs to.
2074 """
2014 """
2075 cl = self.changelog
2015 cl = self.changelog
2076 if not bases:
2016 if not bases:
2077 bases = [nullid]
2017 bases = [nullid]
2078 csets, bases, heads = cl.nodesbetween(bases, heads)
2018 csets, bases, heads = cl.nodesbetween(bases, heads)
2079 # We assume that all ancestors of bases are known
2019 # We assume that all ancestors of bases are known
2080 common = cl.ancestors([cl.rev(n) for n in bases])
2020 common = cl.ancestors([cl.rev(n) for n in bases])
2081 return self._changegroupsubset(common, csets, heads, source)
2021 return self._changegroupsubset(common, csets, heads, source)
2082
2022
2083 def getlocalbundle(self, source, outgoing):
2023 def getlocalbundle(self, source, outgoing):
2084 """Like getbundle, but taking a discovery.outgoing as an argument.
2024 """Like getbundle, but taking a discovery.outgoing as an argument.
2085
2025
2086 This is only implemented for local repos and reuses potentially
2026 This is only implemented for local repos and reuses potentially
2087 precomputed sets in outgoing."""
2027 precomputed sets in outgoing."""
2088 if not outgoing.missing:
2028 if not outgoing.missing:
2089 return None
2029 return None
2090 return self._changegroupsubset(outgoing.common,
2030 return self._changegroupsubset(outgoing.common,
2091 outgoing.missing,
2031 outgoing.missing,
2092 outgoing.missingheads,
2032 outgoing.missingheads,
2093 source)
2033 source)
2094
2034
2095 def getbundle(self, source, heads=None, common=None):
2035 def getbundle(self, source, heads=None, common=None):
2096 """Like changegroupsubset, but returns the set difference between the
2036 """Like changegroupsubset, but returns the set difference between the
2097 ancestors of heads and the ancestors common.
2037 ancestors of heads and the ancestors common.
2098
2038
2099 If heads is None, use the local heads. If common is None, use [nullid].
2039 If heads is None, use the local heads. If common is None, use [nullid].
2100
2040
2101 The nodes in common might not all be known locally due to the way the
2041 The nodes in common might not all be known locally due to the way the
2102 current discovery protocol works.
2042 current discovery protocol works.
2103 """
2043 """
2104 cl = self.changelog
2044 cl = self.changelog
2105 if common:
2045 if common:
2106 hasnode = cl.hasnode
2046 hasnode = cl.hasnode
2107 common = [n for n in common if hasnode(n)]
2047 common = [n for n in common if hasnode(n)]
2108 else:
2048 else:
2109 common = [nullid]
2049 common = [nullid]
2110 if not heads:
2050 if not heads:
2111 heads = cl.heads()
2051 heads = cl.heads()
2112 return self.getlocalbundle(source,
2052 return self.getlocalbundle(source,
2113 discovery.outgoing(cl, common, heads))
2053 discovery.outgoing(cl, common, heads))
2114
2054
2115 @unfilteredmethod
2055 @unfilteredmethod
2116 def _changegroupsubset(self, commonrevs, csets, heads, source):
2056 def _changegroupsubset(self, commonrevs, csets, heads, source):
2117
2057
2118 cl = self.changelog
2058 cl = self.changelog
2119 mf = self.manifest
2059 mf = self.manifest
2120 mfs = {} # needed manifests
2060 mfs = {} # needed manifests
2121 fnodes = {} # needed file nodes
2061 fnodes = {} # needed file nodes
2122 changedfiles = set()
2062 changedfiles = set()
2123 fstate = ['', {}]
2063 fstate = ['', {}]
2124 count = [0, 0]
2064 count = [0, 0]
2125
2065
2126 # can we go through the fast path ?
2066 # can we go through the fast path ?
2127 heads.sort()
2067 heads.sort()
2128 if heads == sorted(self.heads()):
2068 if heads == sorted(self.heads()):
2129 return self._changegroup(csets, source)
2069 return self._changegroup(csets, source)
2130
2070
2131 # slow path
2071 # slow path
2132 self.hook('preoutgoing', throw=True, source=source)
2072 self.hook('preoutgoing', throw=True, source=source)
2133 self.changegroupinfo(csets, source)
2073 self.changegroupinfo(csets, source)
2134
2074
2135 # filter any nodes that claim to be part of the known set
2075 # filter any nodes that claim to be part of the known set
2136 def prune(revlog, missing):
2076 def prune(revlog, missing):
2137 rr, rl = revlog.rev, revlog.linkrev
2077 rr, rl = revlog.rev, revlog.linkrev
2138 return [n for n in missing
2078 return [n for n in missing
2139 if rl(rr(n)) not in commonrevs]
2079 if rl(rr(n)) not in commonrevs]
2140
2080
2141 progress = self.ui.progress
2081 progress = self.ui.progress
2142 _bundling = _('bundling')
2082 _bundling = _('bundling')
2143 _changesets = _('changesets')
2083 _changesets = _('changesets')
2144 _manifests = _('manifests')
2084 _manifests = _('manifests')
2145 _files = _('files')
2085 _files = _('files')
2146
2086
2147 def lookup(revlog, x):
2087 def lookup(revlog, x):
2148 if revlog == cl:
2088 if revlog == cl:
2149 c = cl.read(x)
2089 c = cl.read(x)
2150 changedfiles.update(c[3])
2090 changedfiles.update(c[3])
2151 mfs.setdefault(c[0], x)
2091 mfs.setdefault(c[0], x)
2152 count[0] += 1
2092 count[0] += 1
2153 progress(_bundling, count[0],
2093 progress(_bundling, count[0],
2154 unit=_changesets, total=count[1])
2094 unit=_changesets, total=count[1])
2155 return x
2095 return x
2156 elif revlog == mf:
2096 elif revlog == mf:
2157 clnode = mfs[x]
2097 clnode = mfs[x]
2158 mdata = mf.readfast(x)
2098 mdata = mf.readfast(x)
2159 for f, n in mdata.iteritems():
2099 for f, n in mdata.iteritems():
2160 if f in changedfiles:
2100 if f in changedfiles:
2161 fnodes[f].setdefault(n, clnode)
2101 fnodes[f].setdefault(n, clnode)
2162 count[0] += 1
2102 count[0] += 1
2163 progress(_bundling, count[0],
2103 progress(_bundling, count[0],
2164 unit=_manifests, total=count[1])
2104 unit=_manifests, total=count[1])
2165 return clnode
2105 return clnode
2166 else:
2106 else:
2167 progress(_bundling, count[0], item=fstate[0],
2107 progress(_bundling, count[0], item=fstate[0],
2168 unit=_files, total=count[1])
2108 unit=_files, total=count[1])
2169 return fstate[1][x]
2109 return fstate[1][x]
2170
2110
2171 bundler = changegroup.bundle10(lookup)
2111 bundler = changegroup.bundle10(lookup)
2172 reorder = self.ui.config('bundle', 'reorder', 'auto')
2112 reorder = self.ui.config('bundle', 'reorder', 'auto')
2173 if reorder == 'auto':
2113 if reorder == 'auto':
2174 reorder = None
2114 reorder = None
2175 else:
2115 else:
2176 reorder = util.parsebool(reorder)
2116 reorder = util.parsebool(reorder)
2177
2117
2178 def gengroup():
2118 def gengroup():
2179 # Create a changenode group generator that will call our functions
2119 # Create a changenode group generator that will call our functions
2180 # back to lookup the owning changenode and collect information.
2120 # back to lookup the owning changenode and collect information.
2181 count[:] = [0, len(csets)]
2121 count[:] = [0, len(csets)]
2182 for chunk in cl.group(csets, bundler, reorder=reorder):
2122 for chunk in cl.group(csets, bundler, reorder=reorder):
2183 yield chunk
2123 yield chunk
2184 progress(_bundling, None)
2124 progress(_bundling, None)
2185
2125
2186 # Create a generator for the manifestnodes that calls our lookup
2126 # Create a generator for the manifestnodes that calls our lookup
2187 # and data collection functions back.
2127 # and data collection functions back.
2188 for f in changedfiles:
2128 for f in changedfiles:
2189 fnodes[f] = {}
2129 fnodes[f] = {}
2190 count[:] = [0, len(mfs)]
2130 count[:] = [0, len(mfs)]
2191 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2131 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2192 yield chunk
2132 yield chunk
2193 progress(_bundling, None)
2133 progress(_bundling, None)
2194
2134
2195 mfs.clear()
2135 mfs.clear()
2196
2136
2197 # Go through all our files in order sorted by name.
2137 # Go through all our files in order sorted by name.
2198 count[:] = [0, len(changedfiles)]
2138 count[:] = [0, len(changedfiles)]
2199 for fname in sorted(changedfiles):
2139 for fname in sorted(changedfiles):
2200 filerevlog = self.file(fname)
2140 filerevlog = self.file(fname)
2201 if not len(filerevlog):
2141 if not len(filerevlog):
2202 raise util.Abort(_("empty or missing revlog for %s")
2142 raise util.Abort(_("empty or missing revlog for %s")
2203 % fname)
2143 % fname)
2204 fstate[0] = fname
2144 fstate[0] = fname
2205 fstate[1] = fnodes.pop(fname, {})
2145 fstate[1] = fnodes.pop(fname, {})
2206
2146
2207 nodelist = prune(filerevlog, fstate[1])
2147 nodelist = prune(filerevlog, fstate[1])
2208 if nodelist:
2148 if nodelist:
2209 count[0] += 1
2149 count[0] += 1
2210 yield bundler.fileheader(fname)
2150 yield bundler.fileheader(fname)
2211 for chunk in filerevlog.group(nodelist, bundler, reorder):
2151 for chunk in filerevlog.group(nodelist, bundler, reorder):
2212 yield chunk
2152 yield chunk
2213
2153
2214 # Signal that no more groups are left.
2154 # Signal that no more groups are left.
2215 yield bundler.close()
2155 yield bundler.close()
2216 progress(_bundling, None)
2156 progress(_bundling, None)
2217
2157
2218 if csets:
2158 if csets:
2219 self.hook('outgoing', node=hex(csets[0]), source=source)
2159 self.hook('outgoing', node=hex(csets[0]), source=source)
2220
2160
2221 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2161 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2222
2162
2223 def changegroup(self, basenodes, source):
2163 def changegroup(self, basenodes, source):
2224 # to avoid a race we use changegroupsubset() (issue1320)
2164 # to avoid a race we use changegroupsubset() (issue1320)
2225 return self.changegroupsubset(basenodes, self.heads(), source)
2165 return self.changegroupsubset(basenodes, self.heads(), source)
2226
2166
2227 @unfilteredmethod
2167 @unfilteredmethod
2228 def _changegroup(self, nodes, source):
2168 def _changegroup(self, nodes, source):
2229 """Compute the changegroup of all nodes that we have that a recipient
2169 """Compute the changegroup of all nodes that we have that a recipient
2230 doesn't. Return a chunkbuffer object whose read() method will return
2170 doesn't. Return a chunkbuffer object whose read() method will return
2231 successive changegroup chunks.
2171 successive changegroup chunks.
2232
2172
2233 This is much easier than the previous function as we can assume that
2173 This is much easier than the previous function as we can assume that
2234 the recipient has any changenode we aren't sending them.
2174 the recipient has any changenode we aren't sending them.
2235
2175
2236 nodes is the set of nodes to send"""
2176 nodes is the set of nodes to send"""
2237
2177
2238 cl = self.changelog
2178 cl = self.changelog
2239 mf = self.manifest
2179 mf = self.manifest
2240 mfs = {}
2180 mfs = {}
2241 changedfiles = set()
2181 changedfiles = set()
2242 fstate = ['']
2182 fstate = ['']
2243 count = [0, 0]
2183 count = [0, 0]
2244
2184
2245 self.hook('preoutgoing', throw=True, source=source)
2185 self.hook('preoutgoing', throw=True, source=source)
2246 self.changegroupinfo(nodes, source)
2186 self.changegroupinfo(nodes, source)
2247
2187
2248 revset = set([cl.rev(n) for n in nodes])
2188 revset = set([cl.rev(n) for n in nodes])
2249
2189
2250 def gennodelst(log):
2190 def gennodelst(log):
2251 ln, llr = log.node, log.linkrev
2191 ln, llr = log.node, log.linkrev
2252 return [ln(r) for r in log if llr(r) in revset]
2192 return [ln(r) for r in log if llr(r) in revset]
2253
2193
2254 progress = self.ui.progress
2194 progress = self.ui.progress
2255 _bundling = _('bundling')
2195 _bundling = _('bundling')
2256 _changesets = _('changesets')
2196 _changesets = _('changesets')
2257 _manifests = _('manifests')
2197 _manifests = _('manifests')
2258 _files = _('files')
2198 _files = _('files')
2259
2199
2260 def lookup(revlog, x):
2200 def lookup(revlog, x):
2261 if revlog == cl:
2201 if revlog == cl:
2262 c = cl.read(x)
2202 c = cl.read(x)
2263 changedfiles.update(c[3])
2203 changedfiles.update(c[3])
2264 mfs.setdefault(c[0], x)
2204 mfs.setdefault(c[0], x)
2265 count[0] += 1
2205 count[0] += 1
2266 progress(_bundling, count[0],
2206 progress(_bundling, count[0],
2267 unit=_changesets, total=count[1])
2207 unit=_changesets, total=count[1])
2268 return x
2208 return x
2269 elif revlog == mf:
2209 elif revlog == mf:
2270 count[0] += 1
2210 count[0] += 1
2271 progress(_bundling, count[0],
2211 progress(_bundling, count[0],
2272 unit=_manifests, total=count[1])
2212 unit=_manifests, total=count[1])
2273 return cl.node(revlog.linkrev(revlog.rev(x)))
2213 return cl.node(revlog.linkrev(revlog.rev(x)))
2274 else:
2214 else:
2275 progress(_bundling, count[0], item=fstate[0],
2215 progress(_bundling, count[0], item=fstate[0],
2276 total=count[1], unit=_files)
2216 total=count[1], unit=_files)
2277 return cl.node(revlog.linkrev(revlog.rev(x)))
2217 return cl.node(revlog.linkrev(revlog.rev(x)))
2278
2218
2279 bundler = changegroup.bundle10(lookup)
2219 bundler = changegroup.bundle10(lookup)
2280 reorder = self.ui.config('bundle', 'reorder', 'auto')
2220 reorder = self.ui.config('bundle', 'reorder', 'auto')
2281 if reorder == 'auto':
2221 if reorder == 'auto':
2282 reorder = None
2222 reorder = None
2283 else:
2223 else:
2284 reorder = util.parsebool(reorder)
2224 reorder = util.parsebool(reorder)
2285
2225
2286 def gengroup():
2226 def gengroup():
2287 '''yield a sequence of changegroup chunks (strings)'''
2227 '''yield a sequence of changegroup chunks (strings)'''
2288 # construct a list of all changed files
2228 # construct a list of all changed files
2289
2229
2290 count[:] = [0, len(nodes)]
2230 count[:] = [0, len(nodes)]
2291 for chunk in cl.group(nodes, bundler, reorder=reorder):
2231 for chunk in cl.group(nodes, bundler, reorder=reorder):
2292 yield chunk
2232 yield chunk
2293 progress(_bundling, None)
2233 progress(_bundling, None)
2294
2234
2295 count[:] = [0, len(mfs)]
2235 count[:] = [0, len(mfs)]
2296 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2236 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2297 yield chunk
2237 yield chunk
2298 progress(_bundling, None)
2238 progress(_bundling, None)
2299
2239
2300 count[:] = [0, len(changedfiles)]
2240 count[:] = [0, len(changedfiles)]
2301 for fname in sorted(changedfiles):
2241 for fname in sorted(changedfiles):
2302 filerevlog = self.file(fname)
2242 filerevlog = self.file(fname)
2303 if not len(filerevlog):
2243 if not len(filerevlog):
2304 raise util.Abort(_("empty or missing revlog for %s")
2244 raise util.Abort(_("empty or missing revlog for %s")
2305 % fname)
2245 % fname)
2306 fstate[0] = fname
2246 fstate[0] = fname
2307 nodelist = gennodelst(filerevlog)
2247 nodelist = gennodelst(filerevlog)
2308 if nodelist:
2248 if nodelist:
2309 count[0] += 1
2249 count[0] += 1
2310 yield bundler.fileheader(fname)
2250 yield bundler.fileheader(fname)
2311 for chunk in filerevlog.group(nodelist, bundler, reorder):
2251 for chunk in filerevlog.group(nodelist, bundler, reorder):
2312 yield chunk
2252 yield chunk
2313 yield bundler.close()
2253 yield bundler.close()
2314 progress(_bundling, None)
2254 progress(_bundling, None)
2315
2255
2316 if nodes:
2256 if nodes:
2317 self.hook('outgoing', node=hex(nodes[0]), source=source)
2257 self.hook('outgoing', node=hex(nodes[0]), source=source)
2318
2258
2319 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2259 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2320
2260
2321 @unfilteredmethod
2261 @unfilteredmethod
2322 def addchangegroup(self, source, srctype, url, emptyok=False):
2262 def addchangegroup(self, source, srctype, url, emptyok=False):
2323 """Add the changegroup returned by source.read() to this repo.
2263 """Add the changegroup returned by source.read() to this repo.
2324 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2264 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2325 the URL of the repo where this changegroup is coming from.
2265 the URL of the repo where this changegroup is coming from.
2326
2266
2327 Return an integer summarizing the change to this repo:
2267 Return an integer summarizing the change to this repo:
2328 - nothing changed or no source: 0
2268 - nothing changed or no source: 0
2329 - more heads than before: 1+added heads (2..n)
2269 - more heads than before: 1+added heads (2..n)
2330 - fewer heads than before: -1-removed heads (-2..-n)
2270 - fewer heads than before: -1-removed heads (-2..-n)
2331 - number of heads stays the same: 1
2271 - number of heads stays the same: 1
2332 """
2272 """
2333 def csmap(x):
2273 def csmap(x):
2334 self.ui.debug("add changeset %s\n" % short(x))
2274 self.ui.debug("add changeset %s\n" % short(x))
2335 return len(cl)
2275 return len(cl)
2336
2276
2337 def revmap(x):
2277 def revmap(x):
2338 return cl.rev(x)
2278 return cl.rev(x)
2339
2279
2340 if not source:
2280 if not source:
2341 return 0
2281 return 0
2342
2282
2343 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2283 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2344
2284
2345 changesets = files = revisions = 0
2285 changesets = files = revisions = 0
2346 efiles = set()
2286 efiles = set()
2347
2287
2348 # write changelog data to temp files so concurrent readers will not see
2288 # write changelog data to temp files so concurrent readers will not see
2349 # inconsistent view
2289 # inconsistent view
2350 cl = self.changelog
2290 cl = self.changelog
2351 cl.delayupdate()
2291 cl.delayupdate()
2352 oldheads = cl.heads()
2292 oldheads = cl.heads()
2353
2293
2354 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2294 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2355 try:
2295 try:
2356 trp = weakref.proxy(tr)
2296 trp = weakref.proxy(tr)
2357 # pull off the changeset group
2297 # pull off the changeset group
2358 self.ui.status(_("adding changesets\n"))
2298 self.ui.status(_("adding changesets\n"))
2359 clstart = len(cl)
2299 clstart = len(cl)
2360 class prog(object):
2300 class prog(object):
2361 step = _('changesets')
2301 step = _('changesets')
2362 count = 1
2302 count = 1
2363 ui = self.ui
2303 ui = self.ui
2364 total = None
2304 total = None
2365 def __call__(self):
2305 def __call__(self):
2366 self.ui.progress(self.step, self.count, unit=_('chunks'),
2306 self.ui.progress(self.step, self.count, unit=_('chunks'),
2367 total=self.total)
2307 total=self.total)
2368 self.count += 1
2308 self.count += 1
2369 pr = prog()
2309 pr = prog()
2370 source.callback = pr
2310 source.callback = pr
2371
2311
2372 source.changelogheader()
2312 source.changelogheader()
2373 srccontent = cl.addgroup(source, csmap, trp)
2313 srccontent = cl.addgroup(source, csmap, trp)
2374 if not (srccontent or emptyok):
2314 if not (srccontent or emptyok):
2375 raise util.Abort(_("received changelog group is empty"))
2315 raise util.Abort(_("received changelog group is empty"))
2376 clend = len(cl)
2316 clend = len(cl)
2377 changesets = clend - clstart
2317 changesets = clend - clstart
2378 for c in xrange(clstart, clend):
2318 for c in xrange(clstart, clend):
2379 efiles.update(self[c].files())
2319 efiles.update(self[c].files())
2380 efiles = len(efiles)
2320 efiles = len(efiles)
2381 self.ui.progress(_('changesets'), None)
2321 self.ui.progress(_('changesets'), None)
2382
2322
2383 # pull off the manifest group
2323 # pull off the manifest group
2384 self.ui.status(_("adding manifests\n"))
2324 self.ui.status(_("adding manifests\n"))
2385 pr.step = _('manifests')
2325 pr.step = _('manifests')
2386 pr.count = 1
2326 pr.count = 1
2387 pr.total = changesets # manifests <= changesets
2327 pr.total = changesets # manifests <= changesets
2388 # no need to check for empty manifest group here:
2328 # no need to check for empty manifest group here:
2389 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2329 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2390 # no new manifest will be created and the manifest group will
2330 # no new manifest will be created and the manifest group will
2391 # be empty during the pull
2331 # be empty during the pull
2392 source.manifestheader()
2332 source.manifestheader()
2393 self.manifest.addgroup(source, revmap, trp)
2333 self.manifest.addgroup(source, revmap, trp)
2394 self.ui.progress(_('manifests'), None)
2334 self.ui.progress(_('manifests'), None)
2395
2335
2396 needfiles = {}
2336 needfiles = {}
2397 if self.ui.configbool('server', 'validate', default=False):
2337 if self.ui.configbool('server', 'validate', default=False):
2398 # validate incoming csets have their manifests
2338 # validate incoming csets have their manifests
2399 for cset in xrange(clstart, clend):
2339 for cset in xrange(clstart, clend):
2400 mfest = self.changelog.read(self.changelog.node(cset))[0]
2340 mfest = self.changelog.read(self.changelog.node(cset))[0]
2401 mfest = self.manifest.readdelta(mfest)
2341 mfest = self.manifest.readdelta(mfest)
2402 # store file nodes we must see
2342 # store file nodes we must see
2403 for f, n in mfest.iteritems():
2343 for f, n in mfest.iteritems():
2404 needfiles.setdefault(f, set()).add(n)
2344 needfiles.setdefault(f, set()).add(n)
2405
2345
2406 # process the files
2346 # process the files
2407 self.ui.status(_("adding file changes\n"))
2347 self.ui.status(_("adding file changes\n"))
2408 pr.step = _('files')
2348 pr.step = _('files')
2409 pr.count = 1
2349 pr.count = 1
2410 pr.total = efiles
2350 pr.total = efiles
2411 source.callback = None
2351 source.callback = None
2412
2352
2413 while True:
2353 while True:
2414 chunkdata = source.filelogheader()
2354 chunkdata = source.filelogheader()
2415 if not chunkdata:
2355 if not chunkdata:
2416 break
2356 break
2417 f = chunkdata["filename"]
2357 f = chunkdata["filename"]
2418 self.ui.debug("adding %s revisions\n" % f)
2358 self.ui.debug("adding %s revisions\n" % f)
2419 pr()
2359 pr()
2420 fl = self.file(f)
2360 fl = self.file(f)
2421 o = len(fl)
2361 o = len(fl)
2422 if not fl.addgroup(source, revmap, trp):
2362 if not fl.addgroup(source, revmap, trp):
2423 raise util.Abort(_("received file revlog group is empty"))
2363 raise util.Abort(_("received file revlog group is empty"))
2424 revisions += len(fl) - o
2364 revisions += len(fl) - o
2425 files += 1
2365 files += 1
2426 if f in needfiles:
2366 if f in needfiles:
2427 needs = needfiles[f]
2367 needs = needfiles[f]
2428 for new in xrange(o, len(fl)):
2368 for new in xrange(o, len(fl)):
2429 n = fl.node(new)
2369 n = fl.node(new)
2430 if n in needs:
2370 if n in needs:
2431 needs.remove(n)
2371 needs.remove(n)
2432 if not needs:
2372 if not needs:
2433 del needfiles[f]
2373 del needfiles[f]
2434 self.ui.progress(_('files'), None)
2374 self.ui.progress(_('files'), None)
2435
2375
2436 for f, needs in needfiles.iteritems():
2376 for f, needs in needfiles.iteritems():
2437 fl = self.file(f)
2377 fl = self.file(f)
2438 for n in needs:
2378 for n in needs:
2439 try:
2379 try:
2440 fl.rev(n)
2380 fl.rev(n)
2441 except error.LookupError:
2381 except error.LookupError:
2442 raise util.Abort(
2382 raise util.Abort(
2443 _('missing file data for %s:%s - run hg verify') %
2383 _('missing file data for %s:%s - run hg verify') %
2444 (f, hex(n)))
2384 (f, hex(n)))
2445
2385
2446 dh = 0
2386 dh = 0
2447 if oldheads:
2387 if oldheads:
2448 heads = cl.heads()
2388 heads = cl.heads()
2449 dh = len(heads) - len(oldheads)
2389 dh = len(heads) - len(oldheads)
2450 for h in heads:
2390 for h in heads:
2451 if h not in oldheads and self[h].closesbranch():
2391 if h not in oldheads and self[h].closesbranch():
2452 dh -= 1
2392 dh -= 1
2453 htext = ""
2393 htext = ""
2454 if dh:
2394 if dh:
2455 htext = _(" (%+d heads)") % dh
2395 htext = _(" (%+d heads)") % dh
2456
2396
2457 self.ui.status(_("added %d changesets"
2397 self.ui.status(_("added %d changesets"
2458 " with %d changes to %d files%s\n")
2398 " with %d changes to %d files%s\n")
2459 % (changesets, revisions, files, htext))
2399 % (changesets, revisions, files, htext))
2460 self.invalidatevolatilesets()
2400 self.invalidatevolatilesets()
2461
2401
2462 if changesets > 0:
2402 if changesets > 0:
2463 p = lambda: cl.writepending() and self.root or ""
2403 p = lambda: cl.writepending() and self.root or ""
2464 self.hook('pretxnchangegroup', throw=True,
2404 self.hook('pretxnchangegroup', throw=True,
2465 node=hex(cl.node(clstart)), source=srctype,
2405 node=hex(cl.node(clstart)), source=srctype,
2466 url=url, pending=p)
2406 url=url, pending=p)
2467
2407
2468 added = [cl.node(r) for r in xrange(clstart, clend)]
2408 added = [cl.node(r) for r in xrange(clstart, clend)]
2469 publishing = self.ui.configbool('phases', 'publish', True)
2409 publishing = self.ui.configbool('phases', 'publish', True)
2470 if srctype == 'push':
2410 if srctype == 'push':
2471 # Old server can not push the boundary themself.
2411 # Old server can not push the boundary themself.
2472 # New server won't push the boundary if changeset already
2412 # New server won't push the boundary if changeset already
2473 # existed locally as secrete
2413 # existed locally as secrete
2474 #
2414 #
2475 # We should not use added here but the list of all change in
2415 # We should not use added here but the list of all change in
2476 # the bundle
2416 # the bundle
2477 if publishing:
2417 if publishing:
2478 phases.advanceboundary(self, phases.public, srccontent)
2418 phases.advanceboundary(self, phases.public, srccontent)
2479 else:
2419 else:
2480 phases.advanceboundary(self, phases.draft, srccontent)
2420 phases.advanceboundary(self, phases.draft, srccontent)
2481 phases.retractboundary(self, phases.draft, added)
2421 phases.retractboundary(self, phases.draft, added)
2482 elif srctype != 'strip':
2422 elif srctype != 'strip':
2483 # publishing only alter behavior during push
2423 # publishing only alter behavior during push
2484 #
2424 #
2485 # strip should not touch boundary at all
2425 # strip should not touch boundary at all
2486 phases.retractboundary(self, phases.draft, added)
2426 phases.retractboundary(self, phases.draft, added)
2487
2427
2488 # make changelog see real files again
2428 # make changelog see real files again
2489 cl.finalize(trp)
2429 cl.finalize(trp)
2490
2430
2491 tr.close()
2431 tr.close()
2492
2432
2493 if changesets > 0:
2433 if changesets > 0:
2494 self.updatebranchcache()
2434 self.updatebranchcache()
2495 def runhooks():
2435 def runhooks():
2496 # forcefully update the on-disk branch cache
2436 # forcefully update the on-disk branch cache
2497 self.ui.debug("updating the branch cache\n")
2437 self.ui.debug("updating the branch cache\n")
2498 self.hook("changegroup", node=hex(cl.node(clstart)),
2438 self.hook("changegroup", node=hex(cl.node(clstart)),
2499 source=srctype, url=url)
2439 source=srctype, url=url)
2500
2440
2501 for n in added:
2441 for n in added:
2502 self.hook("incoming", node=hex(n), source=srctype,
2442 self.hook("incoming", node=hex(n), source=srctype,
2503 url=url)
2443 url=url)
2504 self._afterlock(runhooks)
2444 self._afterlock(runhooks)
2505
2445
2506 finally:
2446 finally:
2507 tr.release()
2447 tr.release()
2508 # never return 0 here:
2448 # never return 0 here:
2509 if dh < 0:
2449 if dh < 0:
2510 return dh - 1
2450 return dh - 1
2511 else:
2451 else:
2512 return dh + 1
2452 return dh + 1
2513
2453
2514 def stream_in(self, remote, requirements):
2454 def stream_in(self, remote, requirements):
2515 lock = self.lock()
2455 lock = self.lock()
2516 try:
2456 try:
2517 # Save remote branchmap. We will use it later
2457 # Save remote branchmap. We will use it later
2518 # to speed up branchcache creation
2458 # to speed up branchcache creation
2519 rbranchmap = None
2459 rbranchmap = None
2520 if remote.capable("branchmap"):
2460 if remote.capable("branchmap"):
2521 rbranchmap = remote.branchmap()
2461 rbranchmap = remote.branchmap()
2522
2462
2523 fp = remote.stream_out()
2463 fp = remote.stream_out()
2524 l = fp.readline()
2464 l = fp.readline()
2525 try:
2465 try:
2526 resp = int(l)
2466 resp = int(l)
2527 except ValueError:
2467 except ValueError:
2528 raise error.ResponseError(
2468 raise error.ResponseError(
2529 _('unexpected response from remote server:'), l)
2469 _('unexpected response from remote server:'), l)
2530 if resp == 1:
2470 if resp == 1:
2531 raise util.Abort(_('operation forbidden by server'))
2471 raise util.Abort(_('operation forbidden by server'))
2532 elif resp == 2:
2472 elif resp == 2:
2533 raise util.Abort(_('locking the remote repository failed'))
2473 raise util.Abort(_('locking the remote repository failed'))
2534 elif resp != 0:
2474 elif resp != 0:
2535 raise util.Abort(_('the server sent an unknown error code'))
2475 raise util.Abort(_('the server sent an unknown error code'))
2536 self.ui.status(_('streaming all changes\n'))
2476 self.ui.status(_('streaming all changes\n'))
2537 l = fp.readline()
2477 l = fp.readline()
2538 try:
2478 try:
2539 total_files, total_bytes = map(int, l.split(' ', 1))
2479 total_files, total_bytes = map(int, l.split(' ', 1))
2540 except (ValueError, TypeError):
2480 except (ValueError, TypeError):
2541 raise error.ResponseError(
2481 raise error.ResponseError(
2542 _('unexpected response from remote server:'), l)
2482 _('unexpected response from remote server:'), l)
2543 self.ui.status(_('%d files to transfer, %s of data\n') %
2483 self.ui.status(_('%d files to transfer, %s of data\n') %
2544 (total_files, util.bytecount(total_bytes)))
2484 (total_files, util.bytecount(total_bytes)))
2545 handled_bytes = 0
2485 handled_bytes = 0
2546 self.ui.progress(_('clone'), 0, total=total_bytes)
2486 self.ui.progress(_('clone'), 0, total=total_bytes)
2547 start = time.time()
2487 start = time.time()
2548 for i in xrange(total_files):
2488 for i in xrange(total_files):
2549 # XXX doesn't support '\n' or '\r' in filenames
2489 # XXX doesn't support '\n' or '\r' in filenames
2550 l = fp.readline()
2490 l = fp.readline()
2551 try:
2491 try:
2552 name, size = l.split('\0', 1)
2492 name, size = l.split('\0', 1)
2553 size = int(size)
2493 size = int(size)
2554 except (ValueError, TypeError):
2494 except (ValueError, TypeError):
2555 raise error.ResponseError(
2495 raise error.ResponseError(
2556 _('unexpected response from remote server:'), l)
2496 _('unexpected response from remote server:'), l)
2557 if self.ui.debugflag:
2497 if self.ui.debugflag:
2558 self.ui.debug('adding %s (%s)\n' %
2498 self.ui.debug('adding %s (%s)\n' %
2559 (name, util.bytecount(size)))
2499 (name, util.bytecount(size)))
2560 # for backwards compat, name was partially encoded
2500 # for backwards compat, name was partially encoded
2561 ofp = self.sopener(store.decodedir(name), 'w')
2501 ofp = self.sopener(store.decodedir(name), 'w')
2562 for chunk in util.filechunkiter(fp, limit=size):
2502 for chunk in util.filechunkiter(fp, limit=size):
2563 handled_bytes += len(chunk)
2503 handled_bytes += len(chunk)
2564 self.ui.progress(_('clone'), handled_bytes,
2504 self.ui.progress(_('clone'), handled_bytes,
2565 total=total_bytes)
2505 total=total_bytes)
2566 ofp.write(chunk)
2506 ofp.write(chunk)
2567 ofp.close()
2507 ofp.close()
2568 elapsed = time.time() - start
2508 elapsed = time.time() - start
2569 if elapsed <= 0:
2509 if elapsed <= 0:
2570 elapsed = 0.001
2510 elapsed = 0.001
2571 self.ui.progress(_('clone'), None)
2511 self.ui.progress(_('clone'), None)
2572 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2512 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2573 (util.bytecount(total_bytes), elapsed,
2513 (util.bytecount(total_bytes), elapsed,
2574 util.bytecount(total_bytes / elapsed)))
2514 util.bytecount(total_bytes / elapsed)))
2575
2515
2576 # new requirements = old non-format requirements +
2516 # new requirements = old non-format requirements +
2577 # new format-related
2517 # new format-related
2578 # requirements from the streamed-in repository
2518 # requirements from the streamed-in repository
2579 requirements.update(set(self.requirements) - self.supportedformats)
2519 requirements.update(set(self.requirements) - self.supportedformats)
2580 self._applyrequirements(requirements)
2520 self._applyrequirements(requirements)
2581 self._writerequirements()
2521 self._writerequirements()
2582
2522
2583 if rbranchmap:
2523 if rbranchmap:
2584 rbheads = []
2524 rbheads = []
2585 for bheads in rbranchmap.itervalues():
2525 for bheads in rbranchmap.itervalues():
2586 rbheads.extend(bheads)
2526 rbheads.extend(bheads)
2587
2527
2588 self.branchcache = rbranchmap
2528 self.branchcache = rbranchmap
2589 if rbheads:
2529 if rbheads:
2590 rtiprev = max((int(self.changelog.rev(node))
2530 rtiprev = max((int(self.changelog.rev(node))
2591 for node in rbheads))
2531 for node in rbheads))
2592 branchmap.write(self, self.branchcache,
2532 branchmap.write(self, self.branchcache,
2593 self[rtiprev].node(), rtiprev)
2533 self[rtiprev].node(), rtiprev)
2594 self.invalidate()
2534 self.invalidate()
2595 return len(self.heads()) + 1
2535 return len(self.heads()) + 1
2596 finally:
2536 finally:
2597 lock.release()
2537 lock.release()
2598
2538
2599 def clone(self, remote, heads=[], stream=False):
2539 def clone(self, remote, heads=[], stream=False):
2600 '''clone remote repository.
2540 '''clone remote repository.
2601
2541
2602 keyword arguments:
2542 keyword arguments:
2603 heads: list of revs to clone (forces use of pull)
2543 heads: list of revs to clone (forces use of pull)
2604 stream: use streaming clone if possible'''
2544 stream: use streaming clone if possible'''
2605
2545
2606 # now, all clients that can request uncompressed clones can
2546 # now, all clients that can request uncompressed clones can
2607 # read repo formats supported by all servers that can serve
2547 # read repo formats supported by all servers that can serve
2608 # them.
2548 # them.
2609
2549
2610 # if revlog format changes, client will have to check version
2550 # if revlog format changes, client will have to check version
2611 # and format flags on "stream" capability, and use
2551 # and format flags on "stream" capability, and use
2612 # uncompressed only if compatible.
2552 # uncompressed only if compatible.
2613
2553
2614 if not stream:
2554 if not stream:
2615 # if the server explicitly prefers to stream (for fast LANs)
2555 # if the server explicitly prefers to stream (for fast LANs)
2616 stream = remote.capable('stream-preferred')
2556 stream = remote.capable('stream-preferred')
2617
2557
2618 if stream and not heads:
2558 if stream and not heads:
2619 # 'stream' means remote revlog format is revlogv1 only
2559 # 'stream' means remote revlog format is revlogv1 only
2620 if remote.capable('stream'):
2560 if remote.capable('stream'):
2621 return self.stream_in(remote, set(('revlogv1',)))
2561 return self.stream_in(remote, set(('revlogv1',)))
2622 # otherwise, 'streamreqs' contains the remote revlog format
2562 # otherwise, 'streamreqs' contains the remote revlog format
2623 streamreqs = remote.capable('streamreqs')
2563 streamreqs = remote.capable('streamreqs')
2624 if streamreqs:
2564 if streamreqs:
2625 streamreqs = set(streamreqs.split(','))
2565 streamreqs = set(streamreqs.split(','))
2626 # if we support it, stream in and adjust our requirements
2566 # if we support it, stream in and adjust our requirements
2627 if not streamreqs - self.supportedformats:
2567 if not streamreqs - self.supportedformats:
2628 return self.stream_in(remote, streamreqs)
2568 return self.stream_in(remote, streamreqs)
2629 return self.pull(remote, heads)
2569 return self.pull(remote, heads)
2630
2570
2631 def pushkey(self, namespace, key, old, new):
2571 def pushkey(self, namespace, key, old, new):
2632 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2572 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2633 old=old, new=new)
2573 old=old, new=new)
2634 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2574 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2635 ret = pushkey.push(self, namespace, key, old, new)
2575 ret = pushkey.push(self, namespace, key, old, new)
2636 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2576 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2637 ret=ret)
2577 ret=ret)
2638 return ret
2578 return ret
2639
2579
2640 def listkeys(self, namespace):
2580 def listkeys(self, namespace):
2641 self.hook('prelistkeys', throw=True, namespace=namespace)
2581 self.hook('prelistkeys', throw=True, namespace=namespace)
2642 self.ui.debug('listing keys for "%s"\n' % namespace)
2582 self.ui.debug('listing keys for "%s"\n' % namespace)
2643 values = pushkey.list(self, namespace)
2583 values = pushkey.list(self, namespace)
2644 self.hook('listkeys', namespace=namespace, values=values)
2584 self.hook('listkeys', namespace=namespace, values=values)
2645 return values
2585 return values
2646
2586
2647 def debugwireargs(self, one, two, three=None, four=None, five=None):
2587 def debugwireargs(self, one, two, three=None, four=None, five=None):
2648 '''used to test argument passing over the wire'''
2588 '''used to test argument passing over the wire'''
2649 return "%s %s %s %s %s" % (one, two, three, four, five)
2589 return "%s %s %s %s %s" % (one, two, three, four, five)
2650
2590
2651 def savecommitmessage(self, text):
2591 def savecommitmessage(self, text):
2652 fp = self.opener('last-message.txt', 'wb')
2592 fp = self.opener('last-message.txt', 'wb')
2653 try:
2593 try:
2654 fp.write(text)
2594 fp.write(text)
2655 finally:
2595 finally:
2656 fp.close()
2596 fp.close()
2657 return self.pathto(fp.name[len(self.root) + 1:])
2597 return self.pathto(fp.name[len(self.root) + 1:])
2658
2598
2659 # used to avoid circular references so destructors work
2599 # used to avoid circular references so destructors work
2660 def aftertrans(files):
2600 def aftertrans(files):
2661 renamefiles = [tuple(t) for t in files]
2601 renamefiles = [tuple(t) for t in files]
2662 def a():
2602 def a():
2663 for src, dest in renamefiles:
2603 for src, dest in renamefiles:
2664 try:
2604 try:
2665 util.rename(src, dest)
2605 util.rename(src, dest)
2666 except OSError: # journal file does not yet exist
2606 except OSError: # journal file does not yet exist
2667 pass
2607 pass
2668 return a
2608 return a
2669
2609
2670 def undoname(fn):
2610 def undoname(fn):
2671 base, name = os.path.split(fn)
2611 base, name = os.path.split(fn)
2672 assert name.startswith('journal')
2612 assert name.startswith('journal')
2673 return os.path.join(base, name.replace('journal', 'undo', 1))
2613 return os.path.join(base, name.replace('journal', 'undo', 1))
2674
2614
2675 def instance(ui, path, create):
2615 def instance(ui, path, create):
2676 return localrepository(ui, util.urllocalpath(path), create)
2616 return localrepository(ui, util.urllocalpath(path), create)
2677
2617
2678 def islocal(path):
2618 def islocal(path):
2679 return True
2619 return True
General Comments 0
You need to be logged in to leave comments. Login now