##// END OF EJS Templates
branchmap: make update a method
Pierre-Yves David -
r18131:f0eeb9b3 default
parent child Browse files
Show More
@@ -1,173 +1,173 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev
9 9 import encoding
10 10
11 11 def read(repo):
12 12 partial = branchcache()
13 13 try:
14 14 f = repo.opener("cache/branchheads")
15 15 lines = f.read().split('\n')
16 16 f.close()
17 17 except (IOError, OSError):
18 18 return branchcache()
19 19
20 20 try:
21 21 last, lrev = lines.pop(0).split(" ", 1)
22 22 last, lrev = bin(last), int(lrev)
23 23 if lrev >= len(repo) or repo[lrev].node() != last:
24 24 # invalidate the cache
25 25 raise ValueError('invalidating branch cache (tip differs)')
26 26 for l in lines:
27 27 if not l:
28 28 continue
29 29 node, label = l.split(" ", 1)
30 30 label = encoding.tolocal(label.strip())
31 31 if not node in repo:
32 32 raise ValueError('invalidating branch cache because node '+
33 33 '%s does not exist' % node)
34 34 partial.setdefault(label, []).append(bin(node))
35 35 partial.tipnode = last
36 36 partial.tiprev = lrev
37 37 except KeyboardInterrupt:
38 38 raise
39 39 except Exception, inst:
40 40 if repo.ui.debugflag:
41 41 repo.ui.warn(str(inst), '\n')
42 42 partial = branchcache()
43 43 return partial
44 44
45 def update(repo, partial, ctxgen):
46 """Given a branchhead cache, partial, that may have extra nodes or be
47 missing heads, and a generator of nodes that are at least a superset of
48 heads missing, this function updates partial to be correct.
49 """
50 cl = repo.changelog
51 # collect new branch entries
52 newbranches = {}
53 for c in ctxgen:
54 newbranches.setdefault(c.branch(), []).append(c.node())
55 # if older branchheads are reachable from new ones, they aren't
56 # really branchheads. Note checking parents is insufficient:
57 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
58 for branch, newnodes in newbranches.iteritems():
59 bheads = partial.setdefault(branch, [])
60 # Remove candidate heads that no longer are in the repo (e.g., as
61 # the result of a strip that just happened). Avoid using 'node in
62 # self' here because that dives down into branchcache code somewhat
63 # recursively.
64 bheadrevs = [cl.rev(node) for node in bheads
65 if cl.hasnode(node)]
66 newheadrevs = [cl.rev(node) for node in newnodes
67 if cl.hasnode(node)]
68 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
69 # Remove duplicates - nodes that are in newheadrevs and are already
70 # in bheadrevs. This can happen if you strip a node whose parent
71 # was already a head (because they're on different branches).
72 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
73
74 # Starting from tip means fewer passes over reachable. If we know
75 # the new candidates are not ancestors of existing heads, we don't
76 # have to examine ancestors of existing heads
77 if ctxisnew:
78 iterrevs = sorted(newheadrevs)
79 else:
80 iterrevs = list(bheadrevs)
81
82 # This loop prunes out two kinds of heads - heads that are
83 # superseded by a head in newheadrevs, and newheadrevs that are not
84 # heads because an existing head is their descendant.
85 while iterrevs:
86 latest = iterrevs.pop()
87 if latest not in bheadrevs:
88 continue
89 ancestors = set(cl.ancestors([latest],
90 bheadrevs[0]))
91 if ancestors:
92 bheadrevs = [b for b in bheadrevs if b not in ancestors]
93 partial[branch] = [cl.node(rev) for rev in bheadrevs]
94 tiprev = max(bheadrevs)
95 if tiprev > partial.tiprev:
96 partial.tipnode = cl.node(tiprev)
97 partial.tiprev = tiprev
98
99
100 # There may be branches that cease to exist when the last commit in the
101 # branch was stripped. This code filters them out. Note that the
102 # branch that ceased to exist may not be in newbranches because
103 # newbranches is the set of candidate heads, which when you strip the
104 # last commit in a branch will be the parent branch.
105 droppednodes = []
106 for branch in partial.keys():
107 nodes = [head for head in partial[branch]
108 if cl.hasnode(head)]
109 if not nodes:
110 droppednodes.extend(nodes)
111 del partial[branch]
112 try:
113 node = cl.node(partial.tiprev)
114 except IndexError:
115 node = None
116 if ((partial.tipnode != node)
117 or (partial.tipnode in droppednodes)):
118 # cache key are not valid anymore
119 partial.tipnode = nullid
120 partial.tiprev = nullrev
121 for heads in partial.values():
122 tiprev = max(cl.rev(node) for node in heads)
123 if tiprev > partial.tiprev:
124 partial.tipnode = cl.node(tiprev)
125 partial.tiprev = tiprev
126 45
127 46
128 47 def updatecache(repo):
129 48 repo = repo.unfiltered() # Until we get a smarter cache management
130 49 cl = repo.changelog
131 50 tip = cl.tip()
132 51 partial = repo._branchcache
133 52 if partial is not None and partial.tipnode == tip:
134 53 return
135 54
136 55 if partial is None or partial.tipnode not in cl.nodemap:
137 56 partial = read(repo)
138 57
139 58 catip = repo._cacheabletip()
140 59 # if partial.tiprev == catip: cache is already up to date
141 60 # if partial.tiprev > catip: we have uncachable element in `partial` can't
142 61 # write on disk
143 62 if partial.tiprev < catip:
144 63 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, catip))
145 update(repo, partial, ctxgen)
64 partial.update(repo, ctxgen)
146 65 partial.write(repo)
147 66 # If cacheable tip were lower than actual tip, we need to update the
148 67 # cache up to tip. This update (from cacheable to actual tip) is not
149 68 # written to disk since it's not cacheable.
150 69 tiprev = len(repo) - 1
151 70 if partial.tiprev < tiprev:
152 71 ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, tiprev))
153 update(repo, partial, ctxgen)
72 partial.update(repo, ctxgen)
154 73 repo._branchcache = partial
155 74
156 75 class branchcache(dict):
157 76 """A dict like object that hold branches heads cache"""
158 77
159 78 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev):
160 79 super(branchcache, self).__init__(entries)
161 80 self.tipnode = tipnode
162 81 self.tiprev = tiprev
163 82
164 83 def write(self, repo):
165 84 try:
166 85 f = repo.opener("cache/branchheads", "w", atomictemp=True)
167 86 f.write("%s %s\n" % (hex(self.tipnode), self.tiprev))
168 87 for label, nodes in self.iteritems():
169 88 for node in nodes:
170 89 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
171 90 f.close()
172 91 except (IOError, OSError):
173 92 pass
93
94 def update(self, repo, ctxgen):
95 """Given a branchhead cache, self, that may have extra nodes or be
96 missing heads, and a generator of nodes that are at least a superset of
97 heads missing, this function updates self to be correct.
98 """
99 cl = repo.changelog
100 # collect new branch entries
101 newbranches = {}
102 for c in ctxgen:
103 newbranches.setdefault(c.branch(), []).append(c.node())
104 # if older branchheads are reachable from new ones, they aren't
105 # really branchheads. Note checking parents is insufficient:
106 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
107 for branch, newnodes in newbranches.iteritems():
108 bheads = self.setdefault(branch, [])
109 # Remove candidate heads that no longer are in the repo (e.g., as
110 # the result of a strip that just happened). Avoid using 'node in
111 # self' here because that dives down into branchcache code somewhat
112 # recursively.
113 bheadrevs = [cl.rev(node) for node in bheads
114 if cl.hasnode(node)]
115 newheadrevs = [cl.rev(node) for node in newnodes
116 if cl.hasnode(node)]
117 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
118 # Remove duplicates - nodes that are in newheadrevs and are already
119 # in bheadrevs. This can happen if you strip a node whose parent
120 # was already a head (because they're on different branches).
121 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
122
123 # Starting from tip means fewer passes over reachable. If we know
124 # the new candidates are not ancestors of existing heads, we don't
125 # have to examine ancestors of existing heads
126 if ctxisnew:
127 iterrevs = sorted(newheadrevs)
128 else:
129 iterrevs = list(bheadrevs)
130
131 # This loop prunes out two kinds of heads - heads that are
132 # superseded by a head in newheadrevs, and newheadrevs that are not
133 # heads because an existing head is their descendant.
134 while iterrevs:
135 latest = iterrevs.pop()
136 if latest not in bheadrevs:
137 continue
138 ancestors = set(cl.ancestors([latest],
139 bheadrevs[0]))
140 if ancestors:
141 bheadrevs = [b for b in bheadrevs if b not in ancestors]
142 self[branch] = [cl.node(rev) for rev in bheadrevs]
143 tiprev = max(bheadrevs)
144 if tiprev > self.tiprev:
145 self.tipnode = cl.node(tiprev)
146 self.tiprev = tiprev
147
148 # There may be branches that cease to exist when the last commit in the
149 # branch was stripped. This code filters them out. Note that the
150 # branch that ceased to exist may not be in newbranches because
151 # newbranches is the set of candidate heads, which when you strip the
152 # last commit in a branch will be the parent branch.
153 droppednodes = []
154 for branch in self.keys():
155 nodes = [head for head in self[branch]
156 if cl.hasnode(head)]
157 if not nodes:
158 droppednodes.extend(nodes)
159 del self[branch]
160 try:
161 node = cl.node(self.tiprev)
162 except IndexError:
163 node = None
164 if ((self.tipnode != node)
165 or (self.tipnode in droppednodes)):
166 # cache key are not valid anymore
167 self.tipnode = nullid
168 self.tiprev = nullrev
169 for heads in self.values():
170 tiprev = max(cl.rev(node) for node in heads)
171 if tiprev > self.tiprev:
172 self.tipnode = cl.node(tiprev)
173 self.tiprev = tiprev
@@ -1,348 +1,348 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
11 11 import branchmap
12 12
13 13 def findcommonincoming(repo, remote, heads=None, force=False):
14 14 """Return a tuple (common, anyincoming, heads) used to identify the common
15 15 subset of nodes between repo and remote.
16 16
17 17 "common" is a list of (at least) the heads of the common subset.
18 18 "anyincoming" is testable as a boolean indicating if any nodes are missing
19 19 locally. If remote does not support getbundle, this actually is a list of
20 20 roots of the nodes that would be incoming, to be supplied to
21 21 changegroupsubset. No code except for pull should be relying on this fact
22 22 any longer.
23 23 "heads" is either the supplied heads, or else the remote's heads.
24 24
25 25 If you pass heads and they are all known locally, the response lists just
26 26 these heads in "common" and in "heads".
27 27
28 28 Please use findcommonoutgoing to compute the set of outgoing nodes to give
29 29 extensions a good hook into outgoing.
30 30 """
31 31
32 32 if not remote.capable('getbundle'):
33 33 return treediscovery.findcommonincoming(repo, remote, heads, force)
34 34
35 35 if heads:
36 36 allknown = True
37 37 nm = repo.changelog.nodemap
38 38 for h in heads:
39 39 if nm.get(h) is None:
40 40 allknown = False
41 41 break
42 42 if allknown:
43 43 return (heads, False, heads)
44 44
45 45 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
46 46 abortwhenunrelated=not force)
47 47 common, anyinc, srvheads = res
48 48 return (list(common), anyinc, heads or list(srvheads))
49 49
50 50 class outgoing(object):
51 51 '''Represents the set of nodes present in a local repo but not in a
52 52 (possibly) remote one.
53 53
54 54 Members:
55 55
56 56 missing is a list of all nodes present in local but not in remote.
57 57 common is a list of all nodes shared between the two repos.
58 58 excluded is the list of missing changeset that shouldn't be sent remotely.
59 59 missingheads is the list of heads of missing.
60 60 commonheads is the list of heads of common.
61 61
62 62 The sets are computed on demand from the heads, unless provided upfront
63 63 by discovery.'''
64 64
65 65 def __init__(self, revlog, commonheads, missingheads):
66 66 self.commonheads = commonheads
67 67 self.missingheads = missingheads
68 68 self._revlog = revlog
69 69 self._common = None
70 70 self._missing = None
71 71 self.excluded = []
72 72
73 73 def _computecommonmissing(self):
74 74 sets = self._revlog.findcommonmissing(self.commonheads,
75 75 self.missingheads)
76 76 self._common, self._missing = sets
77 77
78 78 @util.propertycache
79 79 def common(self):
80 80 if self._common is None:
81 81 self._computecommonmissing()
82 82 return self._common
83 83
84 84 @util.propertycache
85 85 def missing(self):
86 86 if self._missing is None:
87 87 self._computecommonmissing()
88 88 return self._missing
89 89
90 90 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
91 91 commoninc=None, portable=False):
92 92 '''Return an outgoing instance to identify the nodes present in repo but
93 93 not in other.
94 94
95 95 If onlyheads is given, only nodes ancestral to nodes in onlyheads
96 96 (inclusive) are included. If you already know the local repo's heads,
97 97 passing them in onlyheads is faster than letting them be recomputed here.
98 98
99 99 If commoninc is given, it must be the result of a prior call to
100 100 findcommonincoming(repo, other, force) to avoid recomputing it here.
101 101
102 102 If portable is given, compute more conservative common and missingheads,
103 103 to make bundles created from the instance more portable.'''
104 104 # declare an empty outgoing object to be filled later
105 105 og = outgoing(repo.changelog, None, None)
106 106
107 107 # get common set if not provided
108 108 if commoninc is None:
109 109 commoninc = findcommonincoming(repo, other, force=force)
110 110 og.commonheads, _any, _hds = commoninc
111 111
112 112 # compute outgoing
113 113 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
114 114 if not mayexclude:
115 115 og.missingheads = onlyheads or repo.heads()
116 116 elif onlyheads is None:
117 117 # use visible heads as it should be cached
118 118 og.missingheads = visibleheads(repo)
119 119 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
120 120 else:
121 121 # compute common, missing and exclude secret stuff
122 122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
123 123 og._common, allmissing = sets
124 124 og._missing = missing = []
125 125 og.excluded = excluded = []
126 126 for node in allmissing:
127 127 ctx = repo[node]
128 128 if ctx.phase() >= phases.secret or ctx.extinct():
129 129 excluded.append(node)
130 130 else:
131 131 missing.append(node)
132 132 if len(missing) == len(allmissing):
133 133 missingheads = onlyheads
134 134 else: # update missing heads
135 135 missingheads = phases.newheads(repo, onlyheads, excluded)
136 136 og.missingheads = missingheads
137 137 if portable:
138 138 # recompute common and missingheads as if -r<rev> had been given for
139 139 # each head of missing, and --base <rev> for each head of the proper
140 140 # ancestors of missing
141 141 og._computecommonmissing()
142 142 cl = repo.changelog
143 143 missingrevs = set(cl.rev(n) for n in og._missing)
144 144 og._common = set(cl.ancestors(missingrevs)) - missingrevs
145 145 commonheads = set(og.commonheads)
146 146 og.missingheads = [h for h in og.missingheads if h not in commonheads]
147 147
148 148 return og
149 149
150 150 def _headssummary(repo, remote, outgoing):
151 151 """compute a summary of branch and heads status before and after push
152 152
153 153 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
154 154
155 155 - branch: the branch name
156 156 - remoteheads: the list of remote heads known locally
157 157 None is the branch is new
158 158 - newheads: the new remote heads (known locally) with outgoing pushed
159 159 - unsyncedheads: the list of remote heads unknown locally.
160 160 """
161 161 cl = repo.changelog
162 162 headssum = {}
163 163 # A. Create set of branches involved in the push.
164 164 branches = set(repo[n].branch() for n in outgoing.missing)
165 165 remotemap = remote.branchmap()
166 166 newbranches = branches - set(remotemap)
167 167 branches.difference_update(newbranches)
168 168
169 169 # A. register remote heads
170 170 remotebranches = set()
171 171 for branch, heads in remote.branchmap().iteritems():
172 172 remotebranches.add(branch)
173 173 known = []
174 174 unsynced = []
175 175 for h in heads:
176 176 if h in cl.nodemap:
177 177 known.append(h)
178 178 else:
179 179 unsynced.append(h)
180 180 headssum[branch] = (known, list(known), unsynced)
181 181 # B. add new branch data
182 182 missingctx = list(repo[n] for n in outgoing.missing)
183 183 touchedbranches = set()
184 184 for ctx in missingctx:
185 185 branch = ctx.branch()
186 186 touchedbranches.add(branch)
187 187 if branch not in headssum:
188 188 headssum[branch] = (None, [], [])
189 189
190 190 # C drop data about untouched branches:
191 191 for branch in remotebranches - touchedbranches:
192 192 del headssum[branch]
193 193
194 194 # D. Update newmap with outgoing changes.
195 195 # This will possibly add new heads and remove existing ones.
196 196 newmap = branchmap.branchcache((branch, heads[1])
197 197 for branch, heads in headssum.iteritems()
198 198 if heads[0] is not None)
199 branchmap.update(repo, newmap, missingctx)
199 newmap.update(repo, missingctx)
200 200 for branch, newheads in newmap.iteritems():
201 201 headssum[branch][1][:] = newheads
202 202 return headssum
203 203
204 204 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
205 205 """Compute branchmapsummary for repo without branchmap support"""
206 206
207 207 cl = repo.changelog
208 208 # 1-4b. old servers: Check for new topological heads.
209 209 # Construct {old,new}map with branch = None (topological branch).
210 210 # (code based on update)
211 211 oldheads = set(h for h in remoteheads if h in cl.nodemap)
212 212 # all nodes in outgoing.missing are children of either:
213 213 # - an element of oldheads
214 214 # - another element of outgoing.missing
215 215 # - nullrev
216 216 # This explains why the new head are very simple to compute.
217 217 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
218 218 newheads = list(c.node() for c in r)
219 219 unsynced = inc and set([None]) or set()
220 220 return {None: (oldheads, newheads, unsynced)}
221 221
222 222 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
223 223 """Check that a push won't add any outgoing head
224 224
225 225 raise Abort error and display ui message as needed.
226 226 """
227 227 # Check for each named branch if we're creating new remote heads.
228 228 # To be a remote head after push, node must be either:
229 229 # - unknown locally
230 230 # - a local outgoing head descended from update
231 231 # - a remote head that's known locally and not
232 232 # ancestral to an outgoing head
233 233 if remoteheads == [nullid]:
234 234 # remote is empty, nothing to check.
235 235 return
236 236
237 237 if remote.capable('branchmap'):
238 238 headssum = _headssummary(repo, remote, outgoing)
239 239 else:
240 240 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
241 241 newbranches = [branch for branch, heads in headssum.iteritems()
242 242 if heads[0] is None]
243 243 # 1. Check for new branches on the remote.
244 244 if newbranches and not newbranch: # new branch requires --new-branch
245 245 branchnames = ', '.join(sorted(newbranches))
246 246 raise util.Abort(_("push creates new remote branches: %s!")
247 247 % branchnames,
248 248 hint=_("use 'hg push --new-branch' to create"
249 249 " new remote branches"))
250 250
251 251 # 2 compute newly pushed bookmarks. We
252 252 # we don't warned about bookmarked heads.
253 253 localbookmarks = repo._bookmarks
254 254 remotebookmarks = remote.listkeys('bookmarks')
255 255 bookmarkedheads = set()
256 256 for bm in localbookmarks:
257 257 rnode = remotebookmarks.get(bm)
258 258 if rnode and rnode in repo:
259 259 lctx, rctx = repo[bm], repo[rnode]
260 260 if bookmarks.validdest(repo, rctx, lctx):
261 261 bookmarkedheads.add(lctx.node())
262 262
263 263 # 3. Check for new heads.
264 264 # If there are more heads after the push than before, a suitable
265 265 # error message, depending on unsynced status, is displayed.
266 266 error = None
267 267 unsynced = False
268 268 allmissing = set(outgoing.missing)
269 269 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
270 270 allfuturecommon.update(allmissing)
271 271 for branch, heads in headssum.iteritems():
272 272 if heads[0] is None:
273 273 # Maybe we should abort if we push more that one head
274 274 # for new branches ?
275 275 continue
276 276 candidate_newhs = set(heads[1])
277 277 # add unsynced data
278 278 oldhs = set(heads[0])
279 279 oldhs.update(heads[2])
280 280 candidate_newhs.update(heads[2])
281 281 dhs = None
282 282 discardedheads = set()
283 283 if repo.obsstore:
284 284 # remove future heads which are actually obsolete by another
285 285 # pushed element:
286 286 #
287 287 # XXX as above, There are several cases this case does not handle
288 288 # XXX properly
289 289 #
290 290 # (1) if <nh> is public, it won't be affected by obsolete marker
291 291 # and a new is created
292 292 #
293 293 # (2) if the new heads have ancestors which are not obsolete and
294 294 # not ancestors of any other heads we will have a new head too.
295 295 #
296 296 # This two case will be easy to handle for know changeset but much
297 297 # more tricky for unsynced changes.
298 298 newhs = set()
299 299 for nh in candidate_newhs:
300 300 if nh in repo and repo[nh].phase() <= phases.public:
301 301 newhs.add(nh)
302 302 else:
303 303 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
304 304 if suc != nh and suc in allfuturecommon:
305 305 discardedheads.add(nh)
306 306 break
307 307 else:
308 308 newhs.add(nh)
309 309 else:
310 310 newhs = candidate_newhs
311 311 if [h for h in heads[2] if h not in discardedheads]:
312 312 unsynced = True
313 313 if len(newhs) > len(oldhs):
314 314 # strip updates to existing remote heads from the new heads list
315 315 dhs = list(newhs - bookmarkedheads - oldhs)
316 316 if dhs:
317 317 if error is None:
318 318 if branch not in ('default', None):
319 319 error = _("push creates new remote head %s "
320 320 "on branch '%s'!") % (short(dhs[0]), branch)
321 321 else:
322 322 error = _("push creates new remote head %s!"
323 323 ) % short(dhs[0])
324 324 if heads[2]: # unsynced
325 325 hint = _("you should pull and merge or "
326 326 "use push -f to force")
327 327 else:
328 328 hint = _("did you forget to merge? "
329 329 "use push -f to force")
330 330 if branch is not None:
331 331 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
332 332 for h in dhs:
333 333 repo.ui.note(_("new remote head %s\n") % short(h))
334 334 if error:
335 335 raise util.Abort(error, hint=hint)
336 336
337 337 # 6. Check for unsynced changes on involved branches.
338 338 if unsynced:
339 339 repo.ui.warn(_("note: unsynced remote changes!\n"))
340 340
341 341 def visibleheads(repo):
342 342 """return the set of visible head of this repo"""
343 343 return repo.filtered('unserved').heads()
344 344
345 345
346 346 def visiblebranchmap(repo):
347 347 """return a branchmap for the visible set"""
348 348 return repo.filtered('unserved').branchmap()
@@ -1,2585 +1,2585 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return discovery.visiblebranchmap(self._repo)
95 95
96 96 def heads(self):
97 97 return discovery.visibleheads(self._repo)
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150
151 151 def _baserequirements(self, create):
152 152 return self.requirements[:]
153 153
154 154 def __init__(self, baseui, path=None, create=False):
155 155 self.wvfs = scmutil.vfs(path, expand=True)
156 156 self.wopener = self.wvfs
157 157 self.root = self.wvfs.base
158 158 self.path = self.wvfs.join(".hg")
159 159 self.origroot = path
160 160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 161 self.vfs = scmutil.vfs(self.path)
162 162 self.opener = self.vfs
163 163 self.baseui = baseui
164 164 self.ui = baseui.copy()
165 165 # A list of callback to shape the phase if no data were found.
166 166 # Callback are in the form: func(repo, roots) --> processed root.
167 167 # This list it to be filled by extension during repo setup
168 168 self._phasedefaults = []
169 169 try:
170 170 self.ui.readconfig(self.join("hgrc"), self.root)
171 171 extensions.loadall(self.ui)
172 172 except IOError:
173 173 pass
174 174
175 175 if not self.vfs.isdir():
176 176 if create:
177 177 if not self.wvfs.exists():
178 178 self.wvfs.makedirs()
179 179 self.vfs.makedir(notindexed=True)
180 180 requirements = self._baserequirements(create)
181 181 if self.ui.configbool('format', 'usestore', True):
182 182 self.vfs.mkdir("store")
183 183 requirements.append("store")
184 184 if self.ui.configbool('format', 'usefncache', True):
185 185 requirements.append("fncache")
186 186 if self.ui.configbool('format', 'dotencode', True):
187 187 requirements.append('dotencode')
188 188 # create an invalid changelog
189 189 self.vfs.append(
190 190 "00changelog.i",
191 191 '\0\0\0\2' # represents revlogv2
192 192 ' dummy changelog to prevent using the old repo layout'
193 193 )
194 194 if self.ui.configbool('format', 'generaldelta', False):
195 195 requirements.append("generaldelta")
196 196 requirements = set(requirements)
197 197 else:
198 198 raise error.RepoError(_("repository %s not found") % path)
199 199 elif create:
200 200 raise error.RepoError(_("repository %s already exists") % path)
201 201 else:
202 202 try:
203 203 requirements = scmutil.readrequires(self.vfs, self.supported)
204 204 except IOError, inst:
205 205 if inst.errno != errno.ENOENT:
206 206 raise
207 207 requirements = set()
208 208
209 209 self.sharedpath = self.path
210 210 try:
211 211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 212 if not os.path.exists(s):
213 213 raise error.RepoError(
214 214 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 215 self.sharedpath = s
216 216 except IOError, inst:
217 217 if inst.errno != errno.ENOENT:
218 218 raise
219 219
220 220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 221 self.spath = self.store.path
222 222 self.svfs = self.store.vfs
223 223 self.sopener = self.svfs
224 224 self.sjoin = self.store.join
225 225 self.vfs.createmode = self.store.createmode
226 226 self._applyrequirements(requirements)
227 227 if create:
228 228 self._writerequirements()
229 229
230 230
231 231 self._branchcache = None
232 232 self.filterpats = {}
233 233 self._datafilters = {}
234 234 self._transref = self._lockref = self._wlockref = None
235 235
236 236 # A cache for various files under .hg/ that tracks file changes,
237 237 # (used by the filecache decorator)
238 238 #
239 239 # Maps a property name to its util.filecacheentry
240 240 self._filecache = {}
241 241
242 242 # hold sets of revision to be filtered
243 243 # should be cleared when something might have changed the filter value:
244 244 # - new changesets,
245 245 # - phase change,
246 246 # - new obsolescence marker,
247 247 # - working directory parent change,
248 248 # - bookmark changes
249 249 self.filteredrevcache = {}
250 250
251 251 def close(self):
252 252 pass
253 253
254 254 def _restrictcapabilities(self, caps):
255 255 return caps
256 256
257 257 def _applyrequirements(self, requirements):
258 258 self.requirements = requirements
259 259 self.sopener.options = dict((r, 1) for r in requirements
260 260 if r in self.openerreqs)
261 261
262 262 def _writerequirements(self):
263 263 reqfile = self.opener("requires", "w")
264 264 for r in self.requirements:
265 265 reqfile.write("%s\n" % r)
266 266 reqfile.close()
267 267
268 268 def _checknested(self, path):
269 269 """Determine if path is a legal nested repository."""
270 270 if not path.startswith(self.root):
271 271 return False
272 272 subpath = path[len(self.root) + 1:]
273 273 normsubpath = util.pconvert(subpath)
274 274
275 275 # XXX: Checking against the current working copy is wrong in
276 276 # the sense that it can reject things like
277 277 #
278 278 # $ hg cat -r 10 sub/x.txt
279 279 #
280 280 # if sub/ is no longer a subrepository in the working copy
281 281 # parent revision.
282 282 #
283 283 # However, it can of course also allow things that would have
284 284 # been rejected before, such as the above cat command if sub/
285 285 # is a subrepository now, but was a normal directory before.
286 286 # The old path auditor would have rejected by mistake since it
287 287 # panics when it sees sub/.hg/.
288 288 #
289 289 # All in all, checking against the working copy seems sensible
290 290 # since we want to prevent access to nested repositories on
291 291 # the filesystem *now*.
292 292 ctx = self[None]
293 293 parts = util.splitpath(subpath)
294 294 while parts:
295 295 prefix = '/'.join(parts)
296 296 if prefix in ctx.substate:
297 297 if prefix == normsubpath:
298 298 return True
299 299 else:
300 300 sub = ctx.sub(prefix)
301 301 return sub.checknested(subpath[len(prefix) + 1:])
302 302 else:
303 303 parts.pop()
304 304 return False
305 305
306 306 def peer(self):
307 307 return localpeer(self) # not cached to avoid reference cycle
308 308
309 309 def unfiltered(self):
310 310 """Return unfiltered version of the repository
311 311
312 312 Intended to be ovewritten by filtered repo."""
313 313 return self
314 314
315 315 def filtered(self, name):
316 316 """Return a filtered version of a repository"""
317 317 # build a new class with the mixin and the current class
318 318 # (possibily subclass of the repo)
319 319 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 320 pass
321 321 return proxycls(self, name)
322 322
323 323 @repofilecache('bookmarks')
324 324 def _bookmarks(self):
325 325 return bookmarks.bmstore(self)
326 326
327 327 @repofilecache('bookmarks.current')
328 328 def _bookmarkcurrent(self):
329 329 return bookmarks.readcurrent(self)
330 330
331 331 def bookmarkheads(self, bookmark):
332 332 name = bookmark.split('@', 1)[0]
333 333 heads = []
334 334 for mark, n in self._bookmarks.iteritems():
335 335 if mark.split('@', 1)[0] == name:
336 336 heads.append(n)
337 337 return heads
338 338
339 339 @storecache('phaseroots')
340 340 def _phasecache(self):
341 341 return phases.phasecache(self, self._phasedefaults)
342 342
343 343 @storecache('obsstore')
344 344 def obsstore(self):
345 345 store = obsolete.obsstore(self.sopener)
346 346 if store and not obsolete._enabled:
347 347 # message is rare enough to not be translated
348 348 msg = 'obsolete feature not enabled but %i markers found!\n'
349 349 self.ui.warn(msg % len(list(store)))
350 350 return store
351 351
352 352 @unfilteredpropertycache
353 353 def hiddenrevs(self):
354 354 """hiddenrevs: revs that should be hidden by command and tools
355 355
356 356 This set is carried on the repo to ease initialization and lazy
357 357 loading; it'll probably move back to changelog for efficiency and
358 358 consistency reasons.
359 359
360 360 Note that the hiddenrevs will needs invalidations when
361 361 - a new changesets is added (possible unstable above extinct)
362 362 - a new obsolete marker is added (possible new extinct changeset)
363 363
364 364 hidden changesets cannot have non-hidden descendants
365 365 """
366 366 hidden = set()
367 367 if self.obsstore:
368 368 ### hide extinct changeset that are not accessible by any mean
369 369 hiddenquery = 'extinct() - ::(. + bookmark())'
370 370 hidden.update(self.revs(hiddenquery))
371 371 return hidden
372 372
373 373 @storecache('00changelog.i')
374 374 def changelog(self):
375 375 c = changelog.changelog(self.sopener)
376 376 if 'HG_PENDING' in os.environ:
377 377 p = os.environ['HG_PENDING']
378 378 if p.startswith(self.root):
379 379 c.readpending('00changelog.i.a')
380 380 return c
381 381
382 382 @storecache('00manifest.i')
383 383 def manifest(self):
384 384 return manifest.manifest(self.sopener)
385 385
386 386 @repofilecache('dirstate')
387 387 def dirstate(self):
388 388 warned = [0]
389 389 def validate(node):
390 390 try:
391 391 self.changelog.rev(node)
392 392 return node
393 393 except error.LookupError:
394 394 if not warned[0]:
395 395 warned[0] = True
396 396 self.ui.warn(_("warning: ignoring unknown"
397 397 " working parent %s!\n") % short(node))
398 398 return nullid
399 399
400 400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 401
402 402 def __getitem__(self, changeid):
403 403 if changeid is None:
404 404 return context.workingctx(self)
405 405 return context.changectx(self, changeid)
406 406
407 407 def __contains__(self, changeid):
408 408 try:
409 409 return bool(self.lookup(changeid))
410 410 except error.RepoLookupError:
411 411 return False
412 412
413 413 def __nonzero__(self):
414 414 return True
415 415
416 416 def __len__(self):
417 417 return len(self.changelog)
418 418
419 419 def __iter__(self):
420 420 return iter(self.changelog)
421 421
422 422 def revs(self, expr, *args):
423 423 '''Return a list of revisions matching the given revset'''
424 424 expr = revset.formatspec(expr, *args)
425 425 m = revset.match(None, expr)
426 426 return [r for r in m(self, list(self))]
427 427
428 428 def set(self, expr, *args):
429 429 '''
430 430 Yield a context for each matching revision, after doing arg
431 431 replacement via revset.formatspec
432 432 '''
433 433 for r in self.revs(expr, *args):
434 434 yield self[r]
435 435
436 436 def url(self):
437 437 return 'file:' + self.root
438 438
439 439 def hook(self, name, throw=False, **args):
440 440 return hook.hook(self.ui, self, name, throw, **args)
441 441
442 442 @unfilteredmethod
443 443 def _tag(self, names, node, message, local, user, date, extra={}):
444 444 if isinstance(names, str):
445 445 names = (names,)
446 446
447 447 branches = self.branchmap()
448 448 for name in names:
449 449 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 450 local=local)
451 451 if name in branches:
452 452 self.ui.warn(_("warning: tag %s conflicts with existing"
453 453 " branch name\n") % name)
454 454
455 455 def writetags(fp, names, munge, prevtags):
456 456 fp.seek(0, 2)
457 457 if prevtags and prevtags[-1] != '\n':
458 458 fp.write('\n')
459 459 for name in names:
460 460 m = munge and munge(name) or name
461 461 if (self._tagscache.tagtypes and
462 462 name in self._tagscache.tagtypes):
463 463 old = self.tags().get(name, nullid)
464 464 fp.write('%s %s\n' % (hex(old), m))
465 465 fp.write('%s %s\n' % (hex(node), m))
466 466 fp.close()
467 467
468 468 prevtags = ''
469 469 if local:
470 470 try:
471 471 fp = self.opener('localtags', 'r+')
472 472 except IOError:
473 473 fp = self.opener('localtags', 'a')
474 474 else:
475 475 prevtags = fp.read()
476 476
477 477 # local tags are stored in the current charset
478 478 writetags(fp, names, None, prevtags)
479 479 for name in names:
480 480 self.hook('tag', node=hex(node), tag=name, local=local)
481 481 return
482 482
483 483 try:
484 484 fp = self.wfile('.hgtags', 'rb+')
485 485 except IOError, e:
486 486 if e.errno != errno.ENOENT:
487 487 raise
488 488 fp = self.wfile('.hgtags', 'ab')
489 489 else:
490 490 prevtags = fp.read()
491 491
492 492 # committed tags are stored in UTF-8
493 493 writetags(fp, names, encoding.fromlocal, prevtags)
494 494
495 495 fp.close()
496 496
497 497 self.invalidatecaches()
498 498
499 499 if '.hgtags' not in self.dirstate:
500 500 self[None].add(['.hgtags'])
501 501
502 502 m = matchmod.exact(self.root, '', ['.hgtags'])
503 503 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 504
505 505 for name in names:
506 506 self.hook('tag', node=hex(node), tag=name, local=local)
507 507
508 508 return tagnode
509 509
510 510 def tag(self, names, node, message, local, user, date):
511 511 '''tag a revision with one or more symbolic names.
512 512
513 513 names is a list of strings or, when adding a single tag, names may be a
514 514 string.
515 515
516 516 if local is True, the tags are stored in a per-repository file.
517 517 otherwise, they are stored in the .hgtags file, and a new
518 518 changeset is committed with the change.
519 519
520 520 keyword arguments:
521 521
522 522 local: whether to store tags in non-version-controlled file
523 523 (default False)
524 524
525 525 message: commit message to use if committing
526 526
527 527 user: name of user to use if committing
528 528
529 529 date: date tuple to use if committing'''
530 530
531 531 if not local:
532 532 for x in self.status()[:5]:
533 533 if '.hgtags' in x:
534 534 raise util.Abort(_('working copy of .hgtags is changed '
535 535 '(please commit .hgtags manually)'))
536 536
537 537 self.tags() # instantiate the cache
538 538 self._tag(names, node, message, local, user, date)
539 539
540 540 @filteredpropertycache
541 541 def _tagscache(self):
542 542 '''Returns a tagscache object that contains various tags related
543 543 caches.'''
544 544
545 545 # This simplifies its cache management by having one decorated
546 546 # function (this one) and the rest simply fetch things from it.
547 547 class tagscache(object):
548 548 def __init__(self):
549 549 # These two define the set of tags for this repository. tags
550 550 # maps tag name to node; tagtypes maps tag name to 'global' or
551 551 # 'local'. (Global tags are defined by .hgtags across all
552 552 # heads, and local tags are defined in .hg/localtags.)
553 553 # They constitute the in-memory cache of tags.
554 554 self.tags = self.tagtypes = None
555 555
556 556 self.nodetagscache = self.tagslist = None
557 557
558 558 cache = tagscache()
559 559 cache.tags, cache.tagtypes = self._findtags()
560 560
561 561 return cache
562 562
563 563 def tags(self):
564 564 '''return a mapping of tag to node'''
565 565 t = {}
566 566 if self.changelog.filteredrevs:
567 567 tags, tt = self._findtags()
568 568 else:
569 569 tags = self._tagscache.tags
570 570 for k, v in tags.iteritems():
571 571 try:
572 572 # ignore tags to unknown nodes
573 573 self.changelog.rev(v)
574 574 t[k] = v
575 575 except (error.LookupError, ValueError):
576 576 pass
577 577 return t
578 578
579 579 def _findtags(self):
580 580 '''Do the hard work of finding tags. Return a pair of dicts
581 581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 582 maps tag name to a string like \'global\' or \'local\'.
583 583 Subclasses or extensions are free to add their own tags, but
584 584 should be aware that the returned dicts will be retained for the
585 585 duration of the localrepo object.'''
586 586
587 587 # XXX what tagtype should subclasses/extensions use? Currently
588 588 # mq and bookmarks add tags, but do not set the tagtype at all.
589 589 # Should each extension invent its own tag type? Should there
590 590 # be one tagtype for all such "virtual" tags? Or is the status
591 591 # quo fine?
592 592
593 593 alltags = {} # map tag name to (node, hist)
594 594 tagtypes = {}
595 595
596 596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 598
599 599 # Build the return dicts. Have to re-encode tag names because
600 600 # the tags module always uses UTF-8 (in order not to lose info
601 601 # writing to the cache), but the rest of Mercurial wants them in
602 602 # local encoding.
603 603 tags = {}
604 604 for (name, (node, hist)) in alltags.iteritems():
605 605 if node != nullid:
606 606 tags[encoding.tolocal(name)] = node
607 607 tags['tip'] = self.changelog.tip()
608 608 tagtypes = dict([(encoding.tolocal(name), value)
609 609 for (name, value) in tagtypes.iteritems()])
610 610 return (tags, tagtypes)
611 611
612 612 def tagtype(self, tagname):
613 613 '''
614 614 return the type of the given tag. result can be:
615 615
616 616 'local' : a local tag
617 617 'global' : a global tag
618 618 None : tag does not exist
619 619 '''
620 620
621 621 return self._tagscache.tagtypes.get(tagname)
622 622
623 623 def tagslist(self):
624 624 '''return a list of tags ordered by revision'''
625 625 if not self._tagscache.tagslist:
626 626 l = []
627 627 for t, n in self.tags().iteritems():
628 628 r = self.changelog.rev(n)
629 629 l.append((r, t, n))
630 630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 631
632 632 return self._tagscache.tagslist
633 633
634 634 def nodetags(self, node):
635 635 '''return the tags associated with a node'''
636 636 if not self._tagscache.nodetagscache:
637 637 nodetagscache = {}
638 638 for t, n in self._tagscache.tags.iteritems():
639 639 nodetagscache.setdefault(n, []).append(t)
640 640 for tags in nodetagscache.itervalues():
641 641 tags.sort()
642 642 self._tagscache.nodetagscache = nodetagscache
643 643 return self._tagscache.nodetagscache.get(node, [])
644 644
645 645 def nodebookmarks(self, node):
646 646 marks = []
647 647 for bookmark, n in self._bookmarks.iteritems():
648 648 if n == node:
649 649 marks.append(bookmark)
650 650 return sorted(marks)
651 651
652 652 def _cacheabletip(self):
653 653 """tip-most revision stable enought to used in persistent cache
654 654
655 655 This function is overwritten by MQ to ensure we do not write cache for
656 656 a part of the history that will likely change.
657 657
658 658 Efficient handling of filtered revision in branchcache should offer a
659 659 better alternative. But we are using this approach until it is ready.
660 660 """
661 661 cl = self.changelog
662 662 return cl.rev(cl.tip())
663 663
664 664 def branchmap(self):
665 665 '''returns a dictionary {branch: [branchheads]}'''
666 666 if self.changelog.filteredrevs:
667 667 # some changeset are excluded we can't use the cache
668 668 bmap = branchmap.branchcache()
669 branchmap.update(self, bmap, (self[r] for r in self))
669 bmap.update(self, (self[r] for r in self))
670 670 return bmap
671 671 else:
672 672 branchmap.updatecache(self)
673 673 return self._branchcache
674 674
675 675
676 676 def _branchtip(self, heads):
677 677 '''return the tipmost branch head in heads'''
678 678 tip = heads[-1]
679 679 for h in reversed(heads):
680 680 if not self[h].closesbranch():
681 681 tip = h
682 682 break
683 683 return tip
684 684
685 685 def branchtip(self, branch):
686 686 '''return the tip node for a given branch'''
687 687 if branch not in self.branchmap():
688 688 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
689 689 return self._branchtip(self.branchmap()[branch])
690 690
691 691 def branchtags(self):
692 692 '''return a dict where branch names map to the tipmost head of
693 693 the branch, open heads come before closed'''
694 694 bt = {}
695 695 for bn, heads in self.branchmap().iteritems():
696 696 bt[bn] = self._branchtip(heads)
697 697 return bt
698 698
699 699 def lookup(self, key):
700 700 return self[key].node()
701 701
702 702 def lookupbranch(self, key, remote=None):
703 703 repo = remote or self
704 704 if key in repo.branchmap():
705 705 return key
706 706
707 707 repo = (remote and remote.local()) and remote or self
708 708 return repo[key].branch()
709 709
710 710 def known(self, nodes):
711 711 nm = self.changelog.nodemap
712 712 pc = self._phasecache
713 713 result = []
714 714 for n in nodes:
715 715 r = nm.get(n)
716 716 resp = not (r is None or pc.phase(self, r) >= phases.secret)
717 717 result.append(resp)
718 718 return result
719 719
720 720 def local(self):
721 721 return self
722 722
723 723 def cancopy(self):
724 724 return self.local() # so statichttprepo's override of local() works
725 725
726 726 def join(self, f):
727 727 return os.path.join(self.path, f)
728 728
729 729 def wjoin(self, f):
730 730 return os.path.join(self.root, f)
731 731
732 732 def file(self, f):
733 733 if f[0] == '/':
734 734 f = f[1:]
735 735 return filelog.filelog(self.sopener, f)
736 736
737 737 def changectx(self, changeid):
738 738 return self[changeid]
739 739
740 740 def parents(self, changeid=None):
741 741 '''get list of changectxs for parents of changeid'''
742 742 return self[changeid].parents()
743 743
744 744 def setparents(self, p1, p2=nullid):
745 745 copies = self.dirstate.setparents(p1, p2)
746 746 if copies:
747 747 # Adjust copy records, the dirstate cannot do it, it
748 748 # requires access to parents manifests. Preserve them
749 749 # only for entries added to first parent.
750 750 pctx = self[p1]
751 751 for f in copies:
752 752 if f not in pctx and copies[f] in pctx:
753 753 self.dirstate.copy(copies[f], f)
754 754
755 755 def filectx(self, path, changeid=None, fileid=None):
756 756 """changeid can be a changeset revision, node, or tag.
757 757 fileid can be a file revision or node."""
758 758 return context.filectx(self, path, changeid, fileid)
759 759
760 760 def getcwd(self):
761 761 return self.dirstate.getcwd()
762 762
763 763 def pathto(self, f, cwd=None):
764 764 return self.dirstate.pathto(f, cwd)
765 765
766 766 def wfile(self, f, mode='r'):
767 767 return self.wopener(f, mode)
768 768
769 769 def _link(self, f):
770 770 return os.path.islink(self.wjoin(f))
771 771
772 772 def _loadfilter(self, filter):
773 773 if filter not in self.filterpats:
774 774 l = []
775 775 for pat, cmd in self.ui.configitems(filter):
776 776 if cmd == '!':
777 777 continue
778 778 mf = matchmod.match(self.root, '', [pat])
779 779 fn = None
780 780 params = cmd
781 781 for name, filterfn in self._datafilters.iteritems():
782 782 if cmd.startswith(name):
783 783 fn = filterfn
784 784 params = cmd[len(name):].lstrip()
785 785 break
786 786 if not fn:
787 787 fn = lambda s, c, **kwargs: util.filter(s, c)
788 788 # Wrap old filters not supporting keyword arguments
789 789 if not inspect.getargspec(fn)[2]:
790 790 oldfn = fn
791 791 fn = lambda s, c, **kwargs: oldfn(s, c)
792 792 l.append((mf, fn, params))
793 793 self.filterpats[filter] = l
794 794 return self.filterpats[filter]
795 795
796 796 def _filter(self, filterpats, filename, data):
797 797 for mf, fn, cmd in filterpats:
798 798 if mf(filename):
799 799 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
800 800 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
801 801 break
802 802
803 803 return data
804 804
805 805 @unfilteredpropertycache
806 806 def _encodefilterpats(self):
807 807 return self._loadfilter('encode')
808 808
809 809 @unfilteredpropertycache
810 810 def _decodefilterpats(self):
811 811 return self._loadfilter('decode')
812 812
813 813 def adddatafilter(self, name, filter):
814 814 self._datafilters[name] = filter
815 815
816 816 def wread(self, filename):
817 817 if self._link(filename):
818 818 data = os.readlink(self.wjoin(filename))
819 819 else:
820 820 data = self.wopener.read(filename)
821 821 return self._filter(self._encodefilterpats, filename, data)
822 822
823 823 def wwrite(self, filename, data, flags):
824 824 data = self._filter(self._decodefilterpats, filename, data)
825 825 if 'l' in flags:
826 826 self.wopener.symlink(data, filename)
827 827 else:
828 828 self.wopener.write(filename, data)
829 829 if 'x' in flags:
830 830 util.setflags(self.wjoin(filename), False, True)
831 831
832 832 def wwritedata(self, filename, data):
833 833 return self._filter(self._decodefilterpats, filename, data)
834 834
835 835 def transaction(self, desc):
836 836 tr = self._transref and self._transref() or None
837 837 if tr and tr.running():
838 838 return tr.nest()
839 839
840 840 # abort here if the journal already exists
841 841 if os.path.exists(self.sjoin("journal")):
842 842 raise error.RepoError(
843 843 _("abandoned transaction found - run hg recover"))
844 844
845 845 self._writejournal(desc)
846 846 renames = [(x, undoname(x)) for x in self._journalfiles()]
847 847
848 848 tr = transaction.transaction(self.ui.warn, self.sopener,
849 849 self.sjoin("journal"),
850 850 aftertrans(renames),
851 851 self.store.createmode)
852 852 self._transref = weakref.ref(tr)
853 853 return tr
854 854
855 855 def _journalfiles(self):
856 856 return (self.sjoin('journal'), self.join('journal.dirstate'),
857 857 self.join('journal.branch'), self.join('journal.desc'),
858 858 self.join('journal.bookmarks'),
859 859 self.sjoin('journal.phaseroots'))
860 860
861 861 def undofiles(self):
862 862 return [undoname(x) for x in self._journalfiles()]
863 863
864 864 def _writejournal(self, desc):
865 865 self.opener.write("journal.dirstate",
866 866 self.opener.tryread("dirstate"))
867 867 self.opener.write("journal.branch",
868 868 encoding.fromlocal(self.dirstate.branch()))
869 869 self.opener.write("journal.desc",
870 870 "%d\n%s\n" % (len(self), desc))
871 871 self.opener.write("journal.bookmarks",
872 872 self.opener.tryread("bookmarks"))
873 873 self.sopener.write("journal.phaseroots",
874 874 self.sopener.tryread("phaseroots"))
875 875
876 876 def recover(self):
877 877 lock = self.lock()
878 878 try:
879 879 if os.path.exists(self.sjoin("journal")):
880 880 self.ui.status(_("rolling back interrupted transaction\n"))
881 881 transaction.rollback(self.sopener, self.sjoin("journal"),
882 882 self.ui.warn)
883 883 self.invalidate()
884 884 return True
885 885 else:
886 886 self.ui.warn(_("no interrupted transaction available\n"))
887 887 return False
888 888 finally:
889 889 lock.release()
890 890
891 891 def rollback(self, dryrun=False, force=False):
892 892 wlock = lock = None
893 893 try:
894 894 wlock = self.wlock()
895 895 lock = self.lock()
896 896 if os.path.exists(self.sjoin("undo")):
897 897 return self._rollback(dryrun, force)
898 898 else:
899 899 self.ui.warn(_("no rollback information available\n"))
900 900 return 1
901 901 finally:
902 902 release(lock, wlock)
903 903
904 904 @unfilteredmethod # Until we get smarter cache management
905 905 def _rollback(self, dryrun, force):
906 906 ui = self.ui
907 907 try:
908 908 args = self.opener.read('undo.desc').splitlines()
909 909 (oldlen, desc, detail) = (int(args[0]), args[1], None)
910 910 if len(args) >= 3:
911 911 detail = args[2]
912 912 oldtip = oldlen - 1
913 913
914 914 if detail and ui.verbose:
915 915 msg = (_('repository tip rolled back to revision %s'
916 916 ' (undo %s: %s)\n')
917 917 % (oldtip, desc, detail))
918 918 else:
919 919 msg = (_('repository tip rolled back to revision %s'
920 920 ' (undo %s)\n')
921 921 % (oldtip, desc))
922 922 except IOError:
923 923 msg = _('rolling back unknown transaction\n')
924 924 desc = None
925 925
926 926 if not force and self['.'] != self['tip'] and desc == 'commit':
927 927 raise util.Abort(
928 928 _('rollback of last commit while not checked out '
929 929 'may lose data'), hint=_('use -f to force'))
930 930
931 931 ui.status(msg)
932 932 if dryrun:
933 933 return 0
934 934
935 935 parents = self.dirstate.parents()
936 936 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
937 937 if os.path.exists(self.join('undo.bookmarks')):
938 938 util.rename(self.join('undo.bookmarks'),
939 939 self.join('bookmarks'))
940 940 if os.path.exists(self.sjoin('undo.phaseroots')):
941 941 util.rename(self.sjoin('undo.phaseroots'),
942 942 self.sjoin('phaseroots'))
943 943 self.invalidate()
944 944
945 945 # Discard all cache entries to force reloading everything.
946 946 self._filecache.clear()
947 947
948 948 parentgone = (parents[0] not in self.changelog.nodemap or
949 949 parents[1] not in self.changelog.nodemap)
950 950 if parentgone:
951 951 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
952 952 try:
953 953 branch = self.opener.read('undo.branch')
954 954 self.dirstate.setbranch(encoding.tolocal(branch))
955 955 except IOError:
956 956 ui.warn(_('named branch could not be reset: '
957 957 'current branch is still \'%s\'\n')
958 958 % self.dirstate.branch())
959 959
960 960 self.dirstate.invalidate()
961 961 parents = tuple([p.rev() for p in self.parents()])
962 962 if len(parents) > 1:
963 963 ui.status(_('working directory now based on '
964 964 'revisions %d and %d\n') % parents)
965 965 else:
966 966 ui.status(_('working directory now based on '
967 967 'revision %d\n') % parents)
968 968 # TODO: if we know which new heads may result from this rollback, pass
969 969 # them to destroy(), which will prevent the branchhead cache from being
970 970 # invalidated.
971 971 self.destroyed()
972 972 return 0
973 973
974 974 def invalidatecaches(self):
975 975
976 976 if '_tagscache' in vars(self):
977 977 # can't use delattr on proxy
978 978 del self.__dict__['_tagscache']
979 979
980 980 self.unfiltered()._branchcache = None # in UTF-8
981 981 self.invalidatevolatilesets()
982 982
983 983 def invalidatevolatilesets(self):
984 984 self.filteredrevcache.clear()
985 985 obsolete.clearobscaches(self)
986 986 if 'hiddenrevs' in vars(self):
987 987 del self.hiddenrevs
988 988
989 989 def invalidatedirstate(self):
990 990 '''Invalidates the dirstate, causing the next call to dirstate
991 991 to check if it was modified since the last time it was read,
992 992 rereading it if it has.
993 993
994 994 This is different to dirstate.invalidate() that it doesn't always
995 995 rereads the dirstate. Use dirstate.invalidate() if you want to
996 996 explicitly read the dirstate again (i.e. restoring it to a previous
997 997 known good state).'''
998 998 if hasunfilteredcache(self, 'dirstate'):
999 999 for k in self.dirstate._filecache:
1000 1000 try:
1001 1001 delattr(self.dirstate, k)
1002 1002 except AttributeError:
1003 1003 pass
1004 1004 delattr(self.unfiltered(), 'dirstate')
1005 1005
1006 1006 def invalidate(self):
1007 1007 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1008 1008 for k in self._filecache:
1009 1009 # dirstate is invalidated separately in invalidatedirstate()
1010 1010 if k == 'dirstate':
1011 1011 continue
1012 1012
1013 1013 try:
1014 1014 delattr(unfiltered, k)
1015 1015 except AttributeError:
1016 1016 pass
1017 1017 self.invalidatecaches()
1018 1018
1019 1019 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1020 1020 try:
1021 1021 l = lock.lock(lockname, 0, releasefn, desc=desc)
1022 1022 except error.LockHeld, inst:
1023 1023 if not wait:
1024 1024 raise
1025 1025 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1026 1026 (desc, inst.locker))
1027 1027 # default to 600 seconds timeout
1028 1028 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1029 1029 releasefn, desc=desc)
1030 1030 if acquirefn:
1031 1031 acquirefn()
1032 1032 return l
1033 1033
1034 1034 def _afterlock(self, callback):
1035 1035 """add a callback to the current repository lock.
1036 1036
1037 1037 The callback will be executed on lock release."""
1038 1038 l = self._lockref and self._lockref()
1039 1039 if l:
1040 1040 l.postrelease.append(callback)
1041 1041 else:
1042 1042 callback()
1043 1043
1044 1044 def lock(self, wait=True):
1045 1045 '''Lock the repository store (.hg/store) and return a weak reference
1046 1046 to the lock. Use this before modifying the store (e.g. committing or
1047 1047 stripping). If you are opening a transaction, get a lock as well.)'''
1048 1048 l = self._lockref and self._lockref()
1049 1049 if l is not None and l.held:
1050 1050 l.lock()
1051 1051 return l
1052 1052
1053 1053 def unlock():
1054 1054 self.store.write()
1055 1055 if hasunfilteredcache(self, '_phasecache'):
1056 1056 self._phasecache.write()
1057 1057 for k, ce in self._filecache.items():
1058 1058 if k == 'dirstate':
1059 1059 continue
1060 1060 ce.refresh()
1061 1061
1062 1062 l = self._lock(self.sjoin("lock"), wait, unlock,
1063 1063 self.invalidate, _('repository %s') % self.origroot)
1064 1064 self._lockref = weakref.ref(l)
1065 1065 return l
1066 1066
1067 1067 def wlock(self, wait=True):
1068 1068 '''Lock the non-store parts of the repository (everything under
1069 1069 .hg except .hg/store) and return a weak reference to the lock.
1070 1070 Use this before modifying files in .hg.'''
1071 1071 l = self._wlockref and self._wlockref()
1072 1072 if l is not None and l.held:
1073 1073 l.lock()
1074 1074 return l
1075 1075
1076 1076 def unlock():
1077 1077 self.dirstate.write()
1078 1078 ce = self._filecache.get('dirstate')
1079 1079 if ce:
1080 1080 ce.refresh()
1081 1081
1082 1082 l = self._lock(self.join("wlock"), wait, unlock,
1083 1083 self.invalidatedirstate, _('working directory of %s') %
1084 1084 self.origroot)
1085 1085 self._wlockref = weakref.ref(l)
1086 1086 return l
1087 1087
1088 1088 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1089 1089 """
1090 1090 commit an individual file as part of a larger transaction
1091 1091 """
1092 1092
1093 1093 fname = fctx.path()
1094 1094 text = fctx.data()
1095 1095 flog = self.file(fname)
1096 1096 fparent1 = manifest1.get(fname, nullid)
1097 1097 fparent2 = fparent2o = manifest2.get(fname, nullid)
1098 1098
1099 1099 meta = {}
1100 1100 copy = fctx.renamed()
1101 1101 if copy and copy[0] != fname:
1102 1102 # Mark the new revision of this file as a copy of another
1103 1103 # file. This copy data will effectively act as a parent
1104 1104 # of this new revision. If this is a merge, the first
1105 1105 # parent will be the nullid (meaning "look up the copy data")
1106 1106 # and the second one will be the other parent. For example:
1107 1107 #
1108 1108 # 0 --- 1 --- 3 rev1 changes file foo
1109 1109 # \ / rev2 renames foo to bar and changes it
1110 1110 # \- 2 -/ rev3 should have bar with all changes and
1111 1111 # should record that bar descends from
1112 1112 # bar in rev2 and foo in rev1
1113 1113 #
1114 1114 # this allows this merge to succeed:
1115 1115 #
1116 1116 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1117 1117 # \ / merging rev3 and rev4 should use bar@rev2
1118 1118 # \- 2 --- 4 as the merge base
1119 1119 #
1120 1120
1121 1121 cfname = copy[0]
1122 1122 crev = manifest1.get(cfname)
1123 1123 newfparent = fparent2
1124 1124
1125 1125 if manifest2: # branch merge
1126 1126 if fparent2 == nullid or crev is None: # copied on remote side
1127 1127 if cfname in manifest2:
1128 1128 crev = manifest2[cfname]
1129 1129 newfparent = fparent1
1130 1130
1131 1131 # find source in nearest ancestor if we've lost track
1132 1132 if not crev:
1133 1133 self.ui.debug(" %s: searching for copy revision for %s\n" %
1134 1134 (fname, cfname))
1135 1135 for ancestor in self[None].ancestors():
1136 1136 if cfname in ancestor:
1137 1137 crev = ancestor[cfname].filenode()
1138 1138 break
1139 1139
1140 1140 if crev:
1141 1141 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1142 1142 meta["copy"] = cfname
1143 1143 meta["copyrev"] = hex(crev)
1144 1144 fparent1, fparent2 = nullid, newfparent
1145 1145 else:
1146 1146 self.ui.warn(_("warning: can't find ancestor for '%s' "
1147 1147 "copied from '%s'!\n") % (fname, cfname))
1148 1148
1149 1149 elif fparent2 != nullid:
1150 1150 # is one parent an ancestor of the other?
1151 1151 fparentancestor = flog.ancestor(fparent1, fparent2)
1152 1152 if fparentancestor == fparent1:
1153 1153 fparent1, fparent2 = fparent2, nullid
1154 1154 elif fparentancestor == fparent2:
1155 1155 fparent2 = nullid
1156 1156
1157 1157 # is the file changed?
1158 1158 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1159 1159 changelist.append(fname)
1160 1160 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1161 1161
1162 1162 # are just the flags changed during merge?
1163 1163 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1164 1164 changelist.append(fname)
1165 1165
1166 1166 return fparent1
1167 1167
1168 1168 @unfilteredmethod
1169 1169 def commit(self, text="", user=None, date=None, match=None, force=False,
1170 1170 editor=False, extra={}):
1171 1171 """Add a new revision to current repository.
1172 1172
1173 1173 Revision information is gathered from the working directory,
1174 1174 match can be used to filter the committed files. If editor is
1175 1175 supplied, it is called to get a commit message.
1176 1176 """
1177 1177
1178 1178 def fail(f, msg):
1179 1179 raise util.Abort('%s: %s' % (f, msg))
1180 1180
1181 1181 if not match:
1182 1182 match = matchmod.always(self.root, '')
1183 1183
1184 1184 if not force:
1185 1185 vdirs = []
1186 1186 match.dir = vdirs.append
1187 1187 match.bad = fail
1188 1188
1189 1189 wlock = self.wlock()
1190 1190 try:
1191 1191 wctx = self[None]
1192 1192 merge = len(wctx.parents()) > 1
1193 1193
1194 1194 if (not force and merge and match and
1195 1195 (match.files() or match.anypats())):
1196 1196 raise util.Abort(_('cannot partially commit a merge '
1197 1197 '(do not specify files or patterns)'))
1198 1198
1199 1199 changes = self.status(match=match, clean=force)
1200 1200 if force:
1201 1201 changes[0].extend(changes[6]) # mq may commit unchanged files
1202 1202
1203 1203 # check subrepos
1204 1204 subs = []
1205 1205 commitsubs = set()
1206 1206 newstate = wctx.substate.copy()
1207 1207 # only manage subrepos and .hgsubstate if .hgsub is present
1208 1208 if '.hgsub' in wctx:
1209 1209 # we'll decide whether to track this ourselves, thanks
1210 1210 if '.hgsubstate' in changes[0]:
1211 1211 changes[0].remove('.hgsubstate')
1212 1212 if '.hgsubstate' in changes[2]:
1213 1213 changes[2].remove('.hgsubstate')
1214 1214
1215 1215 # compare current state to last committed state
1216 1216 # build new substate based on last committed state
1217 1217 oldstate = wctx.p1().substate
1218 1218 for s in sorted(newstate.keys()):
1219 1219 if not match(s):
1220 1220 # ignore working copy, use old state if present
1221 1221 if s in oldstate:
1222 1222 newstate[s] = oldstate[s]
1223 1223 continue
1224 1224 if not force:
1225 1225 raise util.Abort(
1226 1226 _("commit with new subrepo %s excluded") % s)
1227 1227 if wctx.sub(s).dirty(True):
1228 1228 if not self.ui.configbool('ui', 'commitsubrepos'):
1229 1229 raise util.Abort(
1230 1230 _("uncommitted changes in subrepo %s") % s,
1231 1231 hint=_("use --subrepos for recursive commit"))
1232 1232 subs.append(s)
1233 1233 commitsubs.add(s)
1234 1234 else:
1235 1235 bs = wctx.sub(s).basestate()
1236 1236 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1237 1237 if oldstate.get(s, (None, None, None))[1] != bs:
1238 1238 subs.append(s)
1239 1239
1240 1240 # check for removed subrepos
1241 1241 for p in wctx.parents():
1242 1242 r = [s for s in p.substate if s not in newstate]
1243 1243 subs += [s for s in r if match(s)]
1244 1244 if subs:
1245 1245 if (not match('.hgsub') and
1246 1246 '.hgsub' in (wctx.modified() + wctx.added())):
1247 1247 raise util.Abort(
1248 1248 _("can't commit subrepos without .hgsub"))
1249 1249 changes[0].insert(0, '.hgsubstate')
1250 1250
1251 1251 elif '.hgsub' in changes[2]:
1252 1252 # clean up .hgsubstate when .hgsub is removed
1253 1253 if ('.hgsubstate' in wctx and
1254 1254 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1255 1255 changes[2].insert(0, '.hgsubstate')
1256 1256
1257 1257 # make sure all explicit patterns are matched
1258 1258 if not force and match.files():
1259 1259 matched = set(changes[0] + changes[1] + changes[2])
1260 1260
1261 1261 for f in match.files():
1262 1262 f = self.dirstate.normalize(f)
1263 1263 if f == '.' or f in matched or f in wctx.substate:
1264 1264 continue
1265 1265 if f in changes[3]: # missing
1266 1266 fail(f, _('file not found!'))
1267 1267 if f in vdirs: # visited directory
1268 1268 d = f + '/'
1269 1269 for mf in matched:
1270 1270 if mf.startswith(d):
1271 1271 break
1272 1272 else:
1273 1273 fail(f, _("no match under directory!"))
1274 1274 elif f not in self.dirstate:
1275 1275 fail(f, _("file not tracked!"))
1276 1276
1277 1277 if (not force and not extra.get("close") and not merge
1278 1278 and not (changes[0] or changes[1] or changes[2])
1279 1279 and wctx.branch() == wctx.p1().branch()):
1280 1280 return None
1281 1281
1282 1282 if merge and changes[3]:
1283 1283 raise util.Abort(_("cannot commit merge with missing files"))
1284 1284
1285 1285 ms = mergemod.mergestate(self)
1286 1286 for f in changes[0]:
1287 1287 if f in ms and ms[f] == 'u':
1288 1288 raise util.Abort(_("unresolved merge conflicts "
1289 1289 "(see hg help resolve)"))
1290 1290
1291 1291 cctx = context.workingctx(self, text, user, date, extra, changes)
1292 1292 if editor:
1293 1293 cctx._text = editor(self, cctx, subs)
1294 1294 edited = (text != cctx._text)
1295 1295
1296 1296 # commit subs and write new state
1297 1297 if subs:
1298 1298 for s in sorted(commitsubs):
1299 1299 sub = wctx.sub(s)
1300 1300 self.ui.status(_('committing subrepository %s\n') %
1301 1301 subrepo.subrelpath(sub))
1302 1302 sr = sub.commit(cctx._text, user, date)
1303 1303 newstate[s] = (newstate[s][0], sr)
1304 1304 subrepo.writestate(self, newstate)
1305 1305
1306 1306 # Save commit message in case this transaction gets rolled back
1307 1307 # (e.g. by a pretxncommit hook). Leave the content alone on
1308 1308 # the assumption that the user will use the same editor again.
1309 1309 msgfn = self.savecommitmessage(cctx._text)
1310 1310
1311 1311 p1, p2 = self.dirstate.parents()
1312 1312 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1313 1313 try:
1314 1314 self.hook("precommit", throw=True, parent1=hookp1,
1315 1315 parent2=hookp2)
1316 1316 ret = self.commitctx(cctx, True)
1317 1317 except: # re-raises
1318 1318 if edited:
1319 1319 self.ui.write(
1320 1320 _('note: commit message saved in %s\n') % msgfn)
1321 1321 raise
1322 1322
1323 1323 # update bookmarks, dirstate and mergestate
1324 1324 bookmarks.update(self, [p1, p2], ret)
1325 1325 for f in changes[0] + changes[1]:
1326 1326 self.dirstate.normal(f)
1327 1327 for f in changes[2]:
1328 1328 self.dirstate.drop(f)
1329 1329 self.dirstate.setparents(ret)
1330 1330 ms.reset()
1331 1331 finally:
1332 1332 wlock.release()
1333 1333
1334 1334 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1335 1335 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1336 1336 self._afterlock(commithook)
1337 1337 return ret
1338 1338
1339 1339 @unfilteredmethod
1340 1340 def commitctx(self, ctx, error=False):
1341 1341 """Add a new revision to current repository.
1342 1342 Revision information is passed via the context argument.
1343 1343 """
1344 1344
1345 1345 tr = lock = None
1346 1346 removed = list(ctx.removed())
1347 1347 p1, p2 = ctx.p1(), ctx.p2()
1348 1348 user = ctx.user()
1349 1349
1350 1350 lock = self.lock()
1351 1351 try:
1352 1352 tr = self.transaction("commit")
1353 1353 trp = weakref.proxy(tr)
1354 1354
1355 1355 if ctx.files():
1356 1356 m1 = p1.manifest().copy()
1357 1357 m2 = p2.manifest()
1358 1358
1359 1359 # check in files
1360 1360 new = {}
1361 1361 changed = []
1362 1362 linkrev = len(self)
1363 1363 for f in sorted(ctx.modified() + ctx.added()):
1364 1364 self.ui.note(f + "\n")
1365 1365 try:
1366 1366 fctx = ctx[f]
1367 1367 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1368 1368 changed)
1369 1369 m1.set(f, fctx.flags())
1370 1370 except OSError, inst:
1371 1371 self.ui.warn(_("trouble committing %s!\n") % f)
1372 1372 raise
1373 1373 except IOError, inst:
1374 1374 errcode = getattr(inst, 'errno', errno.ENOENT)
1375 1375 if error or errcode and errcode != errno.ENOENT:
1376 1376 self.ui.warn(_("trouble committing %s!\n") % f)
1377 1377 raise
1378 1378 else:
1379 1379 removed.append(f)
1380 1380
1381 1381 # update manifest
1382 1382 m1.update(new)
1383 1383 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1384 1384 drop = [f for f in removed if f in m1]
1385 1385 for f in drop:
1386 1386 del m1[f]
1387 1387 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1388 1388 p2.manifestnode(), (new, drop))
1389 1389 files = changed + removed
1390 1390 else:
1391 1391 mn = p1.manifestnode()
1392 1392 files = []
1393 1393
1394 1394 # update changelog
1395 1395 self.changelog.delayupdate()
1396 1396 n = self.changelog.add(mn, files, ctx.description(),
1397 1397 trp, p1.node(), p2.node(),
1398 1398 user, ctx.date(), ctx.extra().copy())
1399 1399 p = lambda: self.changelog.writepending() and self.root or ""
1400 1400 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1401 1401 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1402 1402 parent2=xp2, pending=p)
1403 1403 self.changelog.finalize(trp)
1404 1404 # set the new commit is proper phase
1405 1405 targetphase = phases.newcommitphase(self.ui)
1406 1406 if targetphase:
1407 1407 # retract boundary do not alter parent changeset.
1408 1408 # if a parent have higher the resulting phase will
1409 1409 # be compliant anyway
1410 1410 #
1411 1411 # if minimal phase was 0 we don't need to retract anything
1412 1412 phases.retractboundary(self, targetphase, [n])
1413 1413 tr.close()
1414 1414 branchmap.updatecache(self)
1415 1415 return n
1416 1416 finally:
1417 1417 if tr:
1418 1418 tr.release()
1419 1419 lock.release()
1420 1420
1421 1421 @unfilteredmethod
1422 1422 def destroyed(self, newheadnodes=None):
1423 1423 '''Inform the repository that nodes have been destroyed.
1424 1424 Intended for use by strip and rollback, so there's a common
1425 1425 place for anything that has to be done after destroying history.
1426 1426
1427 1427 If you know the branchheadcache was uptodate before nodes were removed
1428 1428 and you also know the set of candidate new heads that may have resulted
1429 1429 from the destruction, you can set newheadnodes. This will enable the
1430 1430 code to update the branchheads cache, rather than having future code
1431 1431 decide it's invalid and regenerating it from scratch.
1432 1432 '''
1433 1433 # If we have info, newheadnodes, on how to update the branch cache, do
1434 1434 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1435 1435 # will be caught the next time it is read.
1436 1436 if newheadnodes:
1437 1437 ctxgen = (self[node] for node in newheadnodes
1438 1438 if self.changelog.hasnode(node))
1439 1439 cache = self._branchcache
1440 branchmap.update(self, cache, ctxgen)
1440 cache.update(self, ctxgen)
1441 1441 cache.write(self)
1442 1442
1443 1443 # Ensure the persistent tag cache is updated. Doing it now
1444 1444 # means that the tag cache only has to worry about destroyed
1445 1445 # heads immediately after a strip/rollback. That in turn
1446 1446 # guarantees that "cachetip == currenttip" (comparing both rev
1447 1447 # and node) always means no nodes have been added or destroyed.
1448 1448
1449 1449 # XXX this is suboptimal when qrefresh'ing: we strip the current
1450 1450 # head, refresh the tag cache, then immediately add a new head.
1451 1451 # But I think doing it this way is necessary for the "instant
1452 1452 # tag cache retrieval" case to work.
1453 1453 self.invalidatecaches()
1454 1454
1455 1455 # Discard all cache entries to force reloading everything.
1456 1456 self._filecache.clear()
1457 1457
1458 1458 def walk(self, match, node=None):
1459 1459 '''
1460 1460 walk recursively through the directory tree or a given
1461 1461 changeset, finding all files matched by the match
1462 1462 function
1463 1463 '''
1464 1464 return self[node].walk(match)
1465 1465
1466 1466 def status(self, node1='.', node2=None, match=None,
1467 1467 ignored=False, clean=False, unknown=False,
1468 1468 listsubrepos=False):
1469 1469 """return status of files between two nodes or node and working
1470 1470 directory.
1471 1471
1472 1472 If node1 is None, use the first dirstate parent instead.
1473 1473 If node2 is None, compare node1 with working directory.
1474 1474 """
1475 1475
1476 1476 def mfmatches(ctx):
1477 1477 mf = ctx.manifest().copy()
1478 1478 if match.always():
1479 1479 return mf
1480 1480 for fn in mf.keys():
1481 1481 if not match(fn):
1482 1482 del mf[fn]
1483 1483 return mf
1484 1484
1485 1485 if isinstance(node1, context.changectx):
1486 1486 ctx1 = node1
1487 1487 else:
1488 1488 ctx1 = self[node1]
1489 1489 if isinstance(node2, context.changectx):
1490 1490 ctx2 = node2
1491 1491 else:
1492 1492 ctx2 = self[node2]
1493 1493
1494 1494 working = ctx2.rev() is None
1495 1495 parentworking = working and ctx1 == self['.']
1496 1496 match = match or matchmod.always(self.root, self.getcwd())
1497 1497 listignored, listclean, listunknown = ignored, clean, unknown
1498 1498
1499 1499 # load earliest manifest first for caching reasons
1500 1500 if not working and ctx2.rev() < ctx1.rev():
1501 1501 ctx2.manifest()
1502 1502
1503 1503 if not parentworking:
1504 1504 def bad(f, msg):
1505 1505 # 'f' may be a directory pattern from 'match.files()',
1506 1506 # so 'f not in ctx1' is not enough
1507 1507 if f not in ctx1 and f not in ctx1.dirs():
1508 1508 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1509 1509 match.bad = bad
1510 1510
1511 1511 if working: # we need to scan the working dir
1512 1512 subrepos = []
1513 1513 if '.hgsub' in self.dirstate:
1514 1514 subrepos = ctx2.substate.keys()
1515 1515 s = self.dirstate.status(match, subrepos, listignored,
1516 1516 listclean, listunknown)
1517 1517 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1518 1518
1519 1519 # check for any possibly clean files
1520 1520 if parentworking and cmp:
1521 1521 fixup = []
1522 1522 # do a full compare of any files that might have changed
1523 1523 for f in sorted(cmp):
1524 1524 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1525 1525 or ctx1[f].cmp(ctx2[f])):
1526 1526 modified.append(f)
1527 1527 else:
1528 1528 fixup.append(f)
1529 1529
1530 1530 # update dirstate for files that are actually clean
1531 1531 if fixup:
1532 1532 if listclean:
1533 1533 clean += fixup
1534 1534
1535 1535 try:
1536 1536 # updating the dirstate is optional
1537 1537 # so we don't wait on the lock
1538 1538 wlock = self.wlock(False)
1539 1539 try:
1540 1540 for f in fixup:
1541 1541 self.dirstate.normal(f)
1542 1542 finally:
1543 1543 wlock.release()
1544 1544 except error.LockError:
1545 1545 pass
1546 1546
1547 1547 if not parentworking:
1548 1548 mf1 = mfmatches(ctx1)
1549 1549 if working:
1550 1550 # we are comparing working dir against non-parent
1551 1551 # generate a pseudo-manifest for the working dir
1552 1552 mf2 = mfmatches(self['.'])
1553 1553 for f in cmp + modified + added:
1554 1554 mf2[f] = None
1555 1555 mf2.set(f, ctx2.flags(f))
1556 1556 for f in removed:
1557 1557 if f in mf2:
1558 1558 del mf2[f]
1559 1559 else:
1560 1560 # we are comparing two revisions
1561 1561 deleted, unknown, ignored = [], [], []
1562 1562 mf2 = mfmatches(ctx2)
1563 1563
1564 1564 modified, added, clean = [], [], []
1565 1565 withflags = mf1.withflags() | mf2.withflags()
1566 1566 for fn in mf2:
1567 1567 if fn in mf1:
1568 1568 if (fn not in deleted and
1569 1569 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1570 1570 (mf1[fn] != mf2[fn] and
1571 1571 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1572 1572 modified.append(fn)
1573 1573 elif listclean:
1574 1574 clean.append(fn)
1575 1575 del mf1[fn]
1576 1576 elif fn not in deleted:
1577 1577 added.append(fn)
1578 1578 removed = mf1.keys()
1579 1579
1580 1580 if working and modified and not self.dirstate._checklink:
1581 1581 # Symlink placeholders may get non-symlink-like contents
1582 1582 # via user error or dereferencing by NFS or Samba servers,
1583 1583 # so we filter out any placeholders that don't look like a
1584 1584 # symlink
1585 1585 sane = []
1586 1586 for f in modified:
1587 1587 if ctx2.flags(f) == 'l':
1588 1588 d = ctx2[f].data()
1589 1589 if len(d) >= 1024 or '\n' in d or util.binary(d):
1590 1590 self.ui.debug('ignoring suspect symlink placeholder'
1591 1591 ' "%s"\n' % f)
1592 1592 continue
1593 1593 sane.append(f)
1594 1594 modified = sane
1595 1595
1596 1596 r = modified, added, removed, deleted, unknown, ignored, clean
1597 1597
1598 1598 if listsubrepos:
1599 1599 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1600 1600 if working:
1601 1601 rev2 = None
1602 1602 else:
1603 1603 rev2 = ctx2.substate[subpath][1]
1604 1604 try:
1605 1605 submatch = matchmod.narrowmatcher(subpath, match)
1606 1606 s = sub.status(rev2, match=submatch, ignored=listignored,
1607 1607 clean=listclean, unknown=listunknown,
1608 1608 listsubrepos=True)
1609 1609 for rfiles, sfiles in zip(r, s):
1610 1610 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1611 1611 except error.LookupError:
1612 1612 self.ui.status(_("skipping missing subrepository: %s\n")
1613 1613 % subpath)
1614 1614
1615 1615 for l in r:
1616 1616 l.sort()
1617 1617 return r
1618 1618
1619 1619 def heads(self, start=None):
1620 1620 heads = self.changelog.heads(start)
1621 1621 # sort the output in rev descending order
1622 1622 return sorted(heads, key=self.changelog.rev, reverse=True)
1623 1623
1624 1624 def branchheads(self, branch=None, start=None, closed=False):
1625 1625 '''return a (possibly filtered) list of heads for the given branch
1626 1626
1627 1627 Heads are returned in topological order, from newest to oldest.
1628 1628 If branch is None, use the dirstate branch.
1629 1629 If start is not None, return only heads reachable from start.
1630 1630 If closed is True, return heads that are marked as closed as well.
1631 1631 '''
1632 1632 if branch is None:
1633 1633 branch = self[None].branch()
1634 1634 branches = self.branchmap()
1635 1635 if branch not in branches:
1636 1636 return []
1637 1637 # the cache returns heads ordered lowest to highest
1638 1638 bheads = list(reversed(branches[branch]))
1639 1639 if start is not None:
1640 1640 # filter out the heads that cannot be reached from startrev
1641 1641 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1642 1642 bheads = [h for h in bheads if h in fbheads]
1643 1643 if not closed:
1644 1644 bheads = [h for h in bheads if not self[h].closesbranch()]
1645 1645 return bheads
1646 1646
1647 1647 def branches(self, nodes):
1648 1648 if not nodes:
1649 1649 nodes = [self.changelog.tip()]
1650 1650 b = []
1651 1651 for n in nodes:
1652 1652 t = n
1653 1653 while True:
1654 1654 p = self.changelog.parents(n)
1655 1655 if p[1] != nullid or p[0] == nullid:
1656 1656 b.append((t, n, p[0], p[1]))
1657 1657 break
1658 1658 n = p[0]
1659 1659 return b
1660 1660
1661 1661 def between(self, pairs):
1662 1662 r = []
1663 1663
1664 1664 for top, bottom in pairs:
1665 1665 n, l, i = top, [], 0
1666 1666 f = 1
1667 1667
1668 1668 while n != bottom and n != nullid:
1669 1669 p = self.changelog.parents(n)[0]
1670 1670 if i == f:
1671 1671 l.append(n)
1672 1672 f = f * 2
1673 1673 n = p
1674 1674 i += 1
1675 1675
1676 1676 r.append(l)
1677 1677
1678 1678 return r
1679 1679
1680 1680 def pull(self, remote, heads=None, force=False):
1681 1681 # don't open transaction for nothing or you break future useful
1682 1682 # rollback call
1683 1683 tr = None
1684 1684 trname = 'pull\n' + util.hidepassword(remote.url())
1685 1685 lock = self.lock()
1686 1686 try:
1687 1687 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1688 1688 force=force)
1689 1689 common, fetch, rheads = tmp
1690 1690 if not fetch:
1691 1691 self.ui.status(_("no changes found\n"))
1692 1692 added = []
1693 1693 result = 0
1694 1694 else:
1695 1695 tr = self.transaction(trname)
1696 1696 if heads is None and list(common) == [nullid]:
1697 1697 self.ui.status(_("requesting all changes\n"))
1698 1698 elif heads is None and remote.capable('changegroupsubset'):
1699 1699 # issue1320, avoid a race if remote changed after discovery
1700 1700 heads = rheads
1701 1701
1702 1702 if remote.capable('getbundle'):
1703 1703 cg = remote.getbundle('pull', common=common,
1704 1704 heads=heads or rheads)
1705 1705 elif heads is None:
1706 1706 cg = remote.changegroup(fetch, 'pull')
1707 1707 elif not remote.capable('changegroupsubset'):
1708 1708 raise util.Abort(_("partial pull cannot be done because "
1709 1709 "other repository doesn't support "
1710 1710 "changegroupsubset."))
1711 1711 else:
1712 1712 cg = remote.changegroupsubset(fetch, heads, 'pull')
1713 1713 clstart = len(self.changelog)
1714 1714 result = self.addchangegroup(cg, 'pull', remote.url())
1715 1715 clend = len(self.changelog)
1716 1716 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1717 1717
1718 1718 # compute target subset
1719 1719 if heads is None:
1720 1720 # We pulled every thing possible
1721 1721 # sync on everything common
1722 1722 subset = common + added
1723 1723 else:
1724 1724 # We pulled a specific subset
1725 1725 # sync on this subset
1726 1726 subset = heads
1727 1727
1728 1728 # Get remote phases data from remote
1729 1729 remotephases = remote.listkeys('phases')
1730 1730 publishing = bool(remotephases.get('publishing', False))
1731 1731 if remotephases and not publishing:
1732 1732 # remote is new and unpublishing
1733 1733 pheads, _dr = phases.analyzeremotephases(self, subset,
1734 1734 remotephases)
1735 1735 phases.advanceboundary(self, phases.public, pheads)
1736 1736 phases.advanceboundary(self, phases.draft, subset)
1737 1737 else:
1738 1738 # Remote is old or publishing all common changesets
1739 1739 # should be seen as public
1740 1740 phases.advanceboundary(self, phases.public, subset)
1741 1741
1742 1742 if obsolete._enabled:
1743 1743 self.ui.debug('fetching remote obsolete markers\n')
1744 1744 remoteobs = remote.listkeys('obsolete')
1745 1745 if 'dump0' in remoteobs:
1746 1746 if tr is None:
1747 1747 tr = self.transaction(trname)
1748 1748 for key in sorted(remoteobs, reverse=True):
1749 1749 if key.startswith('dump'):
1750 1750 data = base85.b85decode(remoteobs[key])
1751 1751 self.obsstore.mergemarkers(tr, data)
1752 1752 self.invalidatevolatilesets()
1753 1753 if tr is not None:
1754 1754 tr.close()
1755 1755 finally:
1756 1756 if tr is not None:
1757 1757 tr.release()
1758 1758 lock.release()
1759 1759
1760 1760 return result
1761 1761
1762 1762 def checkpush(self, force, revs):
1763 1763 """Extensions can override this function if additional checks have
1764 1764 to be performed before pushing, or call it if they override push
1765 1765 command.
1766 1766 """
1767 1767 pass
1768 1768
1769 1769 def push(self, remote, force=False, revs=None, newbranch=False):
1770 1770 '''Push outgoing changesets (limited by revs) from the current
1771 1771 repository to remote. Return an integer:
1772 1772 - None means nothing to push
1773 1773 - 0 means HTTP error
1774 1774 - 1 means we pushed and remote head count is unchanged *or*
1775 1775 we have outgoing changesets but refused to push
1776 1776 - other values as described by addchangegroup()
1777 1777 '''
1778 1778 # there are two ways to push to remote repo:
1779 1779 #
1780 1780 # addchangegroup assumes local user can lock remote
1781 1781 # repo (local filesystem, old ssh servers).
1782 1782 #
1783 1783 # unbundle assumes local user cannot lock remote repo (new ssh
1784 1784 # servers, http servers).
1785 1785
1786 1786 if not remote.canpush():
1787 1787 raise util.Abort(_("destination does not support push"))
1788 1788 unfi = self.unfiltered()
1789 1789 # get local lock as we might write phase data
1790 1790 locallock = self.lock()
1791 1791 try:
1792 1792 self.checkpush(force, revs)
1793 1793 lock = None
1794 1794 unbundle = remote.capable('unbundle')
1795 1795 if not unbundle:
1796 1796 lock = remote.lock()
1797 1797 try:
1798 1798 # discovery
1799 1799 fci = discovery.findcommonincoming
1800 1800 commoninc = fci(unfi, remote, force=force)
1801 1801 common, inc, remoteheads = commoninc
1802 1802 fco = discovery.findcommonoutgoing
1803 1803 outgoing = fco(unfi, remote, onlyheads=revs,
1804 1804 commoninc=commoninc, force=force)
1805 1805
1806 1806
1807 1807 if not outgoing.missing:
1808 1808 # nothing to push
1809 1809 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1810 1810 ret = None
1811 1811 else:
1812 1812 # something to push
1813 1813 if not force:
1814 1814 # if self.obsstore == False --> no obsolete
1815 1815 # then, save the iteration
1816 1816 if unfi.obsstore:
1817 1817 # this message are here for 80 char limit reason
1818 1818 mso = _("push includes obsolete changeset: %s!")
1819 1819 msu = _("push includes unstable changeset: %s!")
1820 1820 msb = _("push includes bumped changeset: %s!")
1821 1821 msd = _("push includes divergent changeset: %s!")
1822 1822 # If we are to push if there is at least one
1823 1823 # obsolete or unstable changeset in missing, at
1824 1824 # least one of the missinghead will be obsolete or
1825 1825 # unstable. So checking heads only is ok
1826 1826 for node in outgoing.missingheads:
1827 1827 ctx = unfi[node]
1828 1828 if ctx.obsolete():
1829 1829 raise util.Abort(mso % ctx)
1830 1830 elif ctx.unstable():
1831 1831 raise util.Abort(msu % ctx)
1832 1832 elif ctx.bumped():
1833 1833 raise util.Abort(msb % ctx)
1834 1834 elif ctx.divergent():
1835 1835 raise util.Abort(msd % ctx)
1836 1836 discovery.checkheads(unfi, remote, outgoing,
1837 1837 remoteheads, newbranch,
1838 1838 bool(inc))
1839 1839
1840 1840 # create a changegroup from local
1841 1841 if revs is None and not outgoing.excluded:
1842 1842 # push everything,
1843 1843 # use the fast path, no race possible on push
1844 1844 cg = self._changegroup(outgoing.missing, 'push')
1845 1845 else:
1846 1846 cg = self.getlocalbundle('push', outgoing)
1847 1847
1848 1848 # apply changegroup to remote
1849 1849 if unbundle:
1850 1850 # local repo finds heads on server, finds out what
1851 1851 # revs it must push. once revs transferred, if server
1852 1852 # finds it has different heads (someone else won
1853 1853 # commit/push race), server aborts.
1854 1854 if force:
1855 1855 remoteheads = ['force']
1856 1856 # ssh: return remote's addchangegroup()
1857 1857 # http: return remote's addchangegroup() or 0 for error
1858 1858 ret = remote.unbundle(cg, remoteheads, 'push')
1859 1859 else:
1860 1860 # we return an integer indicating remote head count
1861 1861 # change
1862 1862 ret = remote.addchangegroup(cg, 'push', self.url())
1863 1863
1864 1864 if ret:
1865 1865 # push succeed, synchronize target of the push
1866 1866 cheads = outgoing.missingheads
1867 1867 elif revs is None:
1868 1868 # All out push fails. synchronize all common
1869 1869 cheads = outgoing.commonheads
1870 1870 else:
1871 1871 # I want cheads = heads(::missingheads and ::commonheads)
1872 1872 # (missingheads is revs with secret changeset filtered out)
1873 1873 #
1874 1874 # This can be expressed as:
1875 1875 # cheads = ( (missingheads and ::commonheads)
1876 1876 # + (commonheads and ::missingheads))"
1877 1877 # )
1878 1878 #
1879 1879 # while trying to push we already computed the following:
1880 1880 # common = (::commonheads)
1881 1881 # missing = ((commonheads::missingheads) - commonheads)
1882 1882 #
1883 1883 # We can pick:
1884 1884 # * missingheads part of common (::commonheads)
1885 1885 common = set(outgoing.common)
1886 1886 cheads = [node for node in revs if node in common]
1887 1887 # and
1888 1888 # * commonheads parents on missing
1889 1889 revset = unfi.set('%ln and parents(roots(%ln))',
1890 1890 outgoing.commonheads,
1891 1891 outgoing.missing)
1892 1892 cheads.extend(c.node() for c in revset)
1893 1893 # even when we don't push, exchanging phase data is useful
1894 1894 remotephases = remote.listkeys('phases')
1895 1895 if not remotephases: # old server or public only repo
1896 1896 phases.advanceboundary(self, phases.public, cheads)
1897 1897 # don't push any phase data as there is nothing to push
1898 1898 else:
1899 1899 ana = phases.analyzeremotephases(self, cheads, remotephases)
1900 1900 pheads, droots = ana
1901 1901 ### Apply remote phase on local
1902 1902 if remotephases.get('publishing', False):
1903 1903 phases.advanceboundary(self, phases.public, cheads)
1904 1904 else: # publish = False
1905 1905 phases.advanceboundary(self, phases.public, pheads)
1906 1906 phases.advanceboundary(self, phases.draft, cheads)
1907 1907 ### Apply local phase on remote
1908 1908
1909 1909 # Get the list of all revs draft on remote by public here.
1910 1910 # XXX Beware that revset break if droots is not strictly
1911 1911 # XXX root we may want to ensure it is but it is costly
1912 1912 outdated = unfi.set('heads((%ln::%ln) and public())',
1913 1913 droots, cheads)
1914 1914 for newremotehead in outdated:
1915 1915 r = remote.pushkey('phases',
1916 1916 newremotehead.hex(),
1917 1917 str(phases.draft),
1918 1918 str(phases.public))
1919 1919 if not r:
1920 1920 self.ui.warn(_('updating %s to public failed!\n')
1921 1921 % newremotehead)
1922 1922 self.ui.debug('try to push obsolete markers to remote\n')
1923 1923 if (obsolete._enabled and self.obsstore and
1924 1924 'obsolete' in remote.listkeys('namespaces')):
1925 1925 rslts = []
1926 1926 remotedata = self.listkeys('obsolete')
1927 1927 for key in sorted(remotedata, reverse=True):
1928 1928 # reverse sort to ensure we end with dump0
1929 1929 data = remotedata[key]
1930 1930 rslts.append(remote.pushkey('obsolete', key, '', data))
1931 1931 if [r for r in rslts if not r]:
1932 1932 msg = _('failed to push some obsolete markers!\n')
1933 1933 self.ui.warn(msg)
1934 1934 finally:
1935 1935 if lock is not None:
1936 1936 lock.release()
1937 1937 finally:
1938 1938 locallock.release()
1939 1939
1940 1940 self.ui.debug("checking for updated bookmarks\n")
1941 1941 rb = remote.listkeys('bookmarks')
1942 1942 for k in rb.keys():
1943 1943 if k in unfi._bookmarks:
1944 1944 nr, nl = rb[k], hex(self._bookmarks[k])
1945 1945 if nr in unfi:
1946 1946 cr = unfi[nr]
1947 1947 cl = unfi[nl]
1948 1948 if bookmarks.validdest(unfi, cr, cl):
1949 1949 r = remote.pushkey('bookmarks', k, nr, nl)
1950 1950 if r:
1951 1951 self.ui.status(_("updating bookmark %s\n") % k)
1952 1952 else:
1953 1953 self.ui.warn(_('updating bookmark %s'
1954 1954 ' failed!\n') % k)
1955 1955
1956 1956 return ret
1957 1957
1958 1958 def changegroupinfo(self, nodes, source):
1959 1959 if self.ui.verbose or source == 'bundle':
1960 1960 self.ui.status(_("%d changesets found\n") % len(nodes))
1961 1961 if self.ui.debugflag:
1962 1962 self.ui.debug("list of changesets:\n")
1963 1963 for node in nodes:
1964 1964 self.ui.debug("%s\n" % hex(node))
1965 1965
1966 1966 def changegroupsubset(self, bases, heads, source):
1967 1967 """Compute a changegroup consisting of all the nodes that are
1968 1968 descendants of any of the bases and ancestors of any of the heads.
1969 1969 Return a chunkbuffer object whose read() method will return
1970 1970 successive changegroup chunks.
1971 1971
1972 1972 It is fairly complex as determining which filenodes and which
1973 1973 manifest nodes need to be included for the changeset to be complete
1974 1974 is non-trivial.
1975 1975
1976 1976 Another wrinkle is doing the reverse, figuring out which changeset in
1977 1977 the changegroup a particular filenode or manifestnode belongs to.
1978 1978 """
1979 1979 cl = self.changelog
1980 1980 if not bases:
1981 1981 bases = [nullid]
1982 1982 csets, bases, heads = cl.nodesbetween(bases, heads)
1983 1983 # We assume that all ancestors of bases are known
1984 1984 common = cl.ancestors([cl.rev(n) for n in bases])
1985 1985 return self._changegroupsubset(common, csets, heads, source)
1986 1986
1987 1987 def getlocalbundle(self, source, outgoing):
1988 1988 """Like getbundle, but taking a discovery.outgoing as an argument.
1989 1989
1990 1990 This is only implemented for local repos and reuses potentially
1991 1991 precomputed sets in outgoing."""
1992 1992 if not outgoing.missing:
1993 1993 return None
1994 1994 return self._changegroupsubset(outgoing.common,
1995 1995 outgoing.missing,
1996 1996 outgoing.missingheads,
1997 1997 source)
1998 1998
1999 1999 def getbundle(self, source, heads=None, common=None):
2000 2000 """Like changegroupsubset, but returns the set difference between the
2001 2001 ancestors of heads and the ancestors common.
2002 2002
2003 2003 If heads is None, use the local heads. If common is None, use [nullid].
2004 2004
2005 2005 The nodes in common might not all be known locally due to the way the
2006 2006 current discovery protocol works.
2007 2007 """
2008 2008 cl = self.changelog
2009 2009 if common:
2010 2010 hasnode = cl.hasnode
2011 2011 common = [n for n in common if hasnode(n)]
2012 2012 else:
2013 2013 common = [nullid]
2014 2014 if not heads:
2015 2015 heads = cl.heads()
2016 2016 return self.getlocalbundle(source,
2017 2017 discovery.outgoing(cl, common, heads))
2018 2018
2019 2019 @unfilteredmethod
2020 2020 def _changegroupsubset(self, commonrevs, csets, heads, source):
2021 2021
2022 2022 cl = self.changelog
2023 2023 mf = self.manifest
2024 2024 mfs = {} # needed manifests
2025 2025 fnodes = {} # needed file nodes
2026 2026 changedfiles = set()
2027 2027 fstate = ['', {}]
2028 2028 count = [0, 0]
2029 2029
2030 2030 # can we go through the fast path ?
2031 2031 heads.sort()
2032 2032 if heads == sorted(self.heads()):
2033 2033 return self._changegroup(csets, source)
2034 2034
2035 2035 # slow path
2036 2036 self.hook('preoutgoing', throw=True, source=source)
2037 2037 self.changegroupinfo(csets, source)
2038 2038
2039 2039 # filter any nodes that claim to be part of the known set
2040 2040 def prune(revlog, missing):
2041 2041 rr, rl = revlog.rev, revlog.linkrev
2042 2042 return [n for n in missing
2043 2043 if rl(rr(n)) not in commonrevs]
2044 2044
2045 2045 progress = self.ui.progress
2046 2046 _bundling = _('bundling')
2047 2047 _changesets = _('changesets')
2048 2048 _manifests = _('manifests')
2049 2049 _files = _('files')
2050 2050
2051 2051 def lookup(revlog, x):
2052 2052 if revlog == cl:
2053 2053 c = cl.read(x)
2054 2054 changedfiles.update(c[3])
2055 2055 mfs.setdefault(c[0], x)
2056 2056 count[0] += 1
2057 2057 progress(_bundling, count[0],
2058 2058 unit=_changesets, total=count[1])
2059 2059 return x
2060 2060 elif revlog == mf:
2061 2061 clnode = mfs[x]
2062 2062 mdata = mf.readfast(x)
2063 2063 for f, n in mdata.iteritems():
2064 2064 if f in changedfiles:
2065 2065 fnodes[f].setdefault(n, clnode)
2066 2066 count[0] += 1
2067 2067 progress(_bundling, count[0],
2068 2068 unit=_manifests, total=count[1])
2069 2069 return clnode
2070 2070 else:
2071 2071 progress(_bundling, count[0], item=fstate[0],
2072 2072 unit=_files, total=count[1])
2073 2073 return fstate[1][x]
2074 2074
2075 2075 bundler = changegroup.bundle10(lookup)
2076 2076 reorder = self.ui.config('bundle', 'reorder', 'auto')
2077 2077 if reorder == 'auto':
2078 2078 reorder = None
2079 2079 else:
2080 2080 reorder = util.parsebool(reorder)
2081 2081
2082 2082 def gengroup():
2083 2083 # Create a changenode group generator that will call our functions
2084 2084 # back to lookup the owning changenode and collect information.
2085 2085 count[:] = [0, len(csets)]
2086 2086 for chunk in cl.group(csets, bundler, reorder=reorder):
2087 2087 yield chunk
2088 2088 progress(_bundling, None)
2089 2089
2090 2090 # Create a generator for the manifestnodes that calls our lookup
2091 2091 # and data collection functions back.
2092 2092 for f in changedfiles:
2093 2093 fnodes[f] = {}
2094 2094 count[:] = [0, len(mfs)]
2095 2095 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2096 2096 yield chunk
2097 2097 progress(_bundling, None)
2098 2098
2099 2099 mfs.clear()
2100 2100
2101 2101 # Go through all our files in order sorted by name.
2102 2102 count[:] = [0, len(changedfiles)]
2103 2103 for fname in sorted(changedfiles):
2104 2104 filerevlog = self.file(fname)
2105 2105 if not len(filerevlog):
2106 2106 raise util.Abort(_("empty or missing revlog for %s")
2107 2107 % fname)
2108 2108 fstate[0] = fname
2109 2109 fstate[1] = fnodes.pop(fname, {})
2110 2110
2111 2111 nodelist = prune(filerevlog, fstate[1])
2112 2112 if nodelist:
2113 2113 count[0] += 1
2114 2114 yield bundler.fileheader(fname)
2115 2115 for chunk in filerevlog.group(nodelist, bundler, reorder):
2116 2116 yield chunk
2117 2117
2118 2118 # Signal that no more groups are left.
2119 2119 yield bundler.close()
2120 2120 progress(_bundling, None)
2121 2121
2122 2122 if csets:
2123 2123 self.hook('outgoing', node=hex(csets[0]), source=source)
2124 2124
2125 2125 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2126 2126
2127 2127 def changegroup(self, basenodes, source):
2128 2128 # to avoid a race we use changegroupsubset() (issue1320)
2129 2129 return self.changegroupsubset(basenodes, self.heads(), source)
2130 2130
2131 2131 @unfilteredmethod
2132 2132 def _changegroup(self, nodes, source):
2133 2133 """Compute the changegroup of all nodes that we have that a recipient
2134 2134 doesn't. Return a chunkbuffer object whose read() method will return
2135 2135 successive changegroup chunks.
2136 2136
2137 2137 This is much easier than the previous function as we can assume that
2138 2138 the recipient has any changenode we aren't sending them.
2139 2139
2140 2140 nodes is the set of nodes to send"""
2141 2141
2142 2142 cl = self.changelog
2143 2143 mf = self.manifest
2144 2144 mfs = {}
2145 2145 changedfiles = set()
2146 2146 fstate = ['']
2147 2147 count = [0, 0]
2148 2148
2149 2149 self.hook('preoutgoing', throw=True, source=source)
2150 2150 self.changegroupinfo(nodes, source)
2151 2151
2152 2152 revset = set([cl.rev(n) for n in nodes])
2153 2153
2154 2154 def gennodelst(log):
2155 2155 ln, llr = log.node, log.linkrev
2156 2156 return [ln(r) for r in log if llr(r) in revset]
2157 2157
2158 2158 progress = self.ui.progress
2159 2159 _bundling = _('bundling')
2160 2160 _changesets = _('changesets')
2161 2161 _manifests = _('manifests')
2162 2162 _files = _('files')
2163 2163
2164 2164 def lookup(revlog, x):
2165 2165 if revlog == cl:
2166 2166 c = cl.read(x)
2167 2167 changedfiles.update(c[3])
2168 2168 mfs.setdefault(c[0], x)
2169 2169 count[0] += 1
2170 2170 progress(_bundling, count[0],
2171 2171 unit=_changesets, total=count[1])
2172 2172 return x
2173 2173 elif revlog == mf:
2174 2174 count[0] += 1
2175 2175 progress(_bundling, count[0],
2176 2176 unit=_manifests, total=count[1])
2177 2177 return cl.node(revlog.linkrev(revlog.rev(x)))
2178 2178 else:
2179 2179 progress(_bundling, count[0], item=fstate[0],
2180 2180 total=count[1], unit=_files)
2181 2181 return cl.node(revlog.linkrev(revlog.rev(x)))
2182 2182
2183 2183 bundler = changegroup.bundle10(lookup)
2184 2184 reorder = self.ui.config('bundle', 'reorder', 'auto')
2185 2185 if reorder == 'auto':
2186 2186 reorder = None
2187 2187 else:
2188 2188 reorder = util.parsebool(reorder)
2189 2189
2190 2190 def gengroup():
2191 2191 '''yield a sequence of changegroup chunks (strings)'''
2192 2192 # construct a list of all changed files
2193 2193
2194 2194 count[:] = [0, len(nodes)]
2195 2195 for chunk in cl.group(nodes, bundler, reorder=reorder):
2196 2196 yield chunk
2197 2197 progress(_bundling, None)
2198 2198
2199 2199 count[:] = [0, len(mfs)]
2200 2200 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2201 2201 yield chunk
2202 2202 progress(_bundling, None)
2203 2203
2204 2204 count[:] = [0, len(changedfiles)]
2205 2205 for fname in sorted(changedfiles):
2206 2206 filerevlog = self.file(fname)
2207 2207 if not len(filerevlog):
2208 2208 raise util.Abort(_("empty or missing revlog for %s")
2209 2209 % fname)
2210 2210 fstate[0] = fname
2211 2211 nodelist = gennodelst(filerevlog)
2212 2212 if nodelist:
2213 2213 count[0] += 1
2214 2214 yield bundler.fileheader(fname)
2215 2215 for chunk in filerevlog.group(nodelist, bundler, reorder):
2216 2216 yield chunk
2217 2217 yield bundler.close()
2218 2218 progress(_bundling, None)
2219 2219
2220 2220 if nodes:
2221 2221 self.hook('outgoing', node=hex(nodes[0]), source=source)
2222 2222
2223 2223 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2224 2224
2225 2225 @unfilteredmethod
2226 2226 def addchangegroup(self, source, srctype, url, emptyok=False):
2227 2227 """Add the changegroup returned by source.read() to this repo.
2228 2228 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2229 2229 the URL of the repo where this changegroup is coming from.
2230 2230
2231 2231 Return an integer summarizing the change to this repo:
2232 2232 - nothing changed or no source: 0
2233 2233 - more heads than before: 1+added heads (2..n)
2234 2234 - fewer heads than before: -1-removed heads (-2..-n)
2235 2235 - number of heads stays the same: 1
2236 2236 """
2237 2237 def csmap(x):
2238 2238 self.ui.debug("add changeset %s\n" % short(x))
2239 2239 return len(cl)
2240 2240
2241 2241 def revmap(x):
2242 2242 return cl.rev(x)
2243 2243
2244 2244 if not source:
2245 2245 return 0
2246 2246
2247 2247 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2248 2248
2249 2249 changesets = files = revisions = 0
2250 2250 efiles = set()
2251 2251
2252 2252 # write changelog data to temp files so concurrent readers will not see
2253 2253 # inconsistent view
2254 2254 cl = self.changelog
2255 2255 cl.delayupdate()
2256 2256 oldheads = cl.heads()
2257 2257
2258 2258 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2259 2259 try:
2260 2260 trp = weakref.proxy(tr)
2261 2261 # pull off the changeset group
2262 2262 self.ui.status(_("adding changesets\n"))
2263 2263 clstart = len(cl)
2264 2264 class prog(object):
2265 2265 step = _('changesets')
2266 2266 count = 1
2267 2267 ui = self.ui
2268 2268 total = None
2269 2269 def __call__(self):
2270 2270 self.ui.progress(self.step, self.count, unit=_('chunks'),
2271 2271 total=self.total)
2272 2272 self.count += 1
2273 2273 pr = prog()
2274 2274 source.callback = pr
2275 2275
2276 2276 source.changelogheader()
2277 2277 srccontent = cl.addgroup(source, csmap, trp)
2278 2278 if not (srccontent or emptyok):
2279 2279 raise util.Abort(_("received changelog group is empty"))
2280 2280 clend = len(cl)
2281 2281 changesets = clend - clstart
2282 2282 for c in xrange(clstart, clend):
2283 2283 efiles.update(self[c].files())
2284 2284 efiles = len(efiles)
2285 2285 self.ui.progress(_('changesets'), None)
2286 2286
2287 2287 # pull off the manifest group
2288 2288 self.ui.status(_("adding manifests\n"))
2289 2289 pr.step = _('manifests')
2290 2290 pr.count = 1
2291 2291 pr.total = changesets # manifests <= changesets
2292 2292 # no need to check for empty manifest group here:
2293 2293 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2294 2294 # no new manifest will be created and the manifest group will
2295 2295 # be empty during the pull
2296 2296 source.manifestheader()
2297 2297 self.manifest.addgroup(source, revmap, trp)
2298 2298 self.ui.progress(_('manifests'), None)
2299 2299
2300 2300 needfiles = {}
2301 2301 if self.ui.configbool('server', 'validate', default=False):
2302 2302 # validate incoming csets have their manifests
2303 2303 for cset in xrange(clstart, clend):
2304 2304 mfest = self.changelog.read(self.changelog.node(cset))[0]
2305 2305 mfest = self.manifest.readdelta(mfest)
2306 2306 # store file nodes we must see
2307 2307 for f, n in mfest.iteritems():
2308 2308 needfiles.setdefault(f, set()).add(n)
2309 2309
2310 2310 # process the files
2311 2311 self.ui.status(_("adding file changes\n"))
2312 2312 pr.step = _('files')
2313 2313 pr.count = 1
2314 2314 pr.total = efiles
2315 2315 source.callback = None
2316 2316
2317 2317 while True:
2318 2318 chunkdata = source.filelogheader()
2319 2319 if not chunkdata:
2320 2320 break
2321 2321 f = chunkdata["filename"]
2322 2322 self.ui.debug("adding %s revisions\n" % f)
2323 2323 pr()
2324 2324 fl = self.file(f)
2325 2325 o = len(fl)
2326 2326 if not fl.addgroup(source, revmap, trp):
2327 2327 raise util.Abort(_("received file revlog group is empty"))
2328 2328 revisions += len(fl) - o
2329 2329 files += 1
2330 2330 if f in needfiles:
2331 2331 needs = needfiles[f]
2332 2332 for new in xrange(o, len(fl)):
2333 2333 n = fl.node(new)
2334 2334 if n in needs:
2335 2335 needs.remove(n)
2336 2336 if not needs:
2337 2337 del needfiles[f]
2338 2338 self.ui.progress(_('files'), None)
2339 2339
2340 2340 for f, needs in needfiles.iteritems():
2341 2341 fl = self.file(f)
2342 2342 for n in needs:
2343 2343 try:
2344 2344 fl.rev(n)
2345 2345 except error.LookupError:
2346 2346 raise util.Abort(
2347 2347 _('missing file data for %s:%s - run hg verify') %
2348 2348 (f, hex(n)))
2349 2349
2350 2350 dh = 0
2351 2351 if oldheads:
2352 2352 heads = cl.heads()
2353 2353 dh = len(heads) - len(oldheads)
2354 2354 for h in heads:
2355 2355 if h not in oldheads and self[h].closesbranch():
2356 2356 dh -= 1
2357 2357 htext = ""
2358 2358 if dh:
2359 2359 htext = _(" (%+d heads)") % dh
2360 2360
2361 2361 self.ui.status(_("added %d changesets"
2362 2362 " with %d changes to %d files%s\n")
2363 2363 % (changesets, revisions, files, htext))
2364 2364 self.invalidatevolatilesets()
2365 2365
2366 2366 if changesets > 0:
2367 2367 p = lambda: cl.writepending() and self.root or ""
2368 2368 self.hook('pretxnchangegroup', throw=True,
2369 2369 node=hex(cl.node(clstart)), source=srctype,
2370 2370 url=url, pending=p)
2371 2371
2372 2372 added = [cl.node(r) for r in xrange(clstart, clend)]
2373 2373 publishing = self.ui.configbool('phases', 'publish', True)
2374 2374 if srctype == 'push':
2375 2375 # Old server can not push the boundary themself.
2376 2376 # New server won't push the boundary if changeset already
2377 2377 # existed locally as secrete
2378 2378 #
2379 2379 # We should not use added here but the list of all change in
2380 2380 # the bundle
2381 2381 if publishing:
2382 2382 phases.advanceboundary(self, phases.public, srccontent)
2383 2383 else:
2384 2384 phases.advanceboundary(self, phases.draft, srccontent)
2385 2385 phases.retractboundary(self, phases.draft, added)
2386 2386 elif srctype != 'strip':
2387 2387 # publishing only alter behavior during push
2388 2388 #
2389 2389 # strip should not touch boundary at all
2390 2390 phases.retractboundary(self, phases.draft, added)
2391 2391
2392 2392 # make changelog see real files again
2393 2393 cl.finalize(trp)
2394 2394
2395 2395 tr.close()
2396 2396
2397 2397 if changesets > 0:
2398 2398 branchmap.updatecache(self)
2399 2399 def runhooks():
2400 2400 # forcefully update the on-disk branch cache
2401 2401 self.ui.debug("updating the branch cache\n")
2402 2402 self.hook("changegroup", node=hex(cl.node(clstart)),
2403 2403 source=srctype, url=url)
2404 2404
2405 2405 for n in added:
2406 2406 self.hook("incoming", node=hex(n), source=srctype,
2407 2407 url=url)
2408 2408 self._afterlock(runhooks)
2409 2409
2410 2410 finally:
2411 2411 tr.release()
2412 2412 # never return 0 here:
2413 2413 if dh < 0:
2414 2414 return dh - 1
2415 2415 else:
2416 2416 return dh + 1
2417 2417
2418 2418 def stream_in(self, remote, requirements):
2419 2419 lock = self.lock()
2420 2420 try:
2421 2421 # Save remote branchmap. We will use it later
2422 2422 # to speed up branchcache creation
2423 2423 rbranchmap = None
2424 2424 if remote.capable("branchmap"):
2425 2425 rbranchmap = remote.branchmap()
2426 2426
2427 2427 fp = remote.stream_out()
2428 2428 l = fp.readline()
2429 2429 try:
2430 2430 resp = int(l)
2431 2431 except ValueError:
2432 2432 raise error.ResponseError(
2433 2433 _('unexpected response from remote server:'), l)
2434 2434 if resp == 1:
2435 2435 raise util.Abort(_('operation forbidden by server'))
2436 2436 elif resp == 2:
2437 2437 raise util.Abort(_('locking the remote repository failed'))
2438 2438 elif resp != 0:
2439 2439 raise util.Abort(_('the server sent an unknown error code'))
2440 2440 self.ui.status(_('streaming all changes\n'))
2441 2441 l = fp.readline()
2442 2442 try:
2443 2443 total_files, total_bytes = map(int, l.split(' ', 1))
2444 2444 except (ValueError, TypeError):
2445 2445 raise error.ResponseError(
2446 2446 _('unexpected response from remote server:'), l)
2447 2447 self.ui.status(_('%d files to transfer, %s of data\n') %
2448 2448 (total_files, util.bytecount(total_bytes)))
2449 2449 handled_bytes = 0
2450 2450 self.ui.progress(_('clone'), 0, total=total_bytes)
2451 2451 start = time.time()
2452 2452 for i in xrange(total_files):
2453 2453 # XXX doesn't support '\n' or '\r' in filenames
2454 2454 l = fp.readline()
2455 2455 try:
2456 2456 name, size = l.split('\0', 1)
2457 2457 size = int(size)
2458 2458 except (ValueError, TypeError):
2459 2459 raise error.ResponseError(
2460 2460 _('unexpected response from remote server:'), l)
2461 2461 if self.ui.debugflag:
2462 2462 self.ui.debug('adding %s (%s)\n' %
2463 2463 (name, util.bytecount(size)))
2464 2464 # for backwards compat, name was partially encoded
2465 2465 ofp = self.sopener(store.decodedir(name), 'w')
2466 2466 for chunk in util.filechunkiter(fp, limit=size):
2467 2467 handled_bytes += len(chunk)
2468 2468 self.ui.progress(_('clone'), handled_bytes,
2469 2469 total=total_bytes)
2470 2470 ofp.write(chunk)
2471 2471 ofp.close()
2472 2472 elapsed = time.time() - start
2473 2473 if elapsed <= 0:
2474 2474 elapsed = 0.001
2475 2475 self.ui.progress(_('clone'), None)
2476 2476 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2477 2477 (util.bytecount(total_bytes), elapsed,
2478 2478 util.bytecount(total_bytes / elapsed)))
2479 2479
2480 2480 # new requirements = old non-format requirements +
2481 2481 # new format-related
2482 2482 # requirements from the streamed-in repository
2483 2483 requirements.update(set(self.requirements) - self.supportedformats)
2484 2484 self._applyrequirements(requirements)
2485 2485 self._writerequirements()
2486 2486
2487 2487 if rbranchmap:
2488 2488 rbheads = []
2489 2489 for bheads in rbranchmap.itervalues():
2490 2490 rbheads.extend(bheads)
2491 2491
2492 2492 if rbheads:
2493 2493 rtiprev = max((int(self.changelog.rev(node))
2494 2494 for node in rbheads))
2495 2495 cache = branchmap.branchcache(rbranchmap,
2496 2496 self[rtiprev].node(),
2497 2497 rtiprev)
2498 2498 self._branchcache = cache
2499 2499 cache.write(self)
2500 2500 self.invalidate()
2501 2501 return len(self.heads()) + 1
2502 2502 finally:
2503 2503 lock.release()
2504 2504
2505 2505 def clone(self, remote, heads=[], stream=False):
2506 2506 '''clone remote repository.
2507 2507
2508 2508 keyword arguments:
2509 2509 heads: list of revs to clone (forces use of pull)
2510 2510 stream: use streaming clone if possible'''
2511 2511
2512 2512 # now, all clients that can request uncompressed clones can
2513 2513 # read repo formats supported by all servers that can serve
2514 2514 # them.
2515 2515
2516 2516 # if revlog format changes, client will have to check version
2517 2517 # and format flags on "stream" capability, and use
2518 2518 # uncompressed only if compatible.
2519 2519
2520 2520 if not stream:
2521 2521 # if the server explicitly prefers to stream (for fast LANs)
2522 2522 stream = remote.capable('stream-preferred')
2523 2523
2524 2524 if stream and not heads:
2525 2525 # 'stream' means remote revlog format is revlogv1 only
2526 2526 if remote.capable('stream'):
2527 2527 return self.stream_in(remote, set(('revlogv1',)))
2528 2528 # otherwise, 'streamreqs' contains the remote revlog format
2529 2529 streamreqs = remote.capable('streamreqs')
2530 2530 if streamreqs:
2531 2531 streamreqs = set(streamreqs.split(','))
2532 2532 # if we support it, stream in and adjust our requirements
2533 2533 if not streamreqs - self.supportedformats:
2534 2534 return self.stream_in(remote, streamreqs)
2535 2535 return self.pull(remote, heads)
2536 2536
2537 2537 def pushkey(self, namespace, key, old, new):
2538 2538 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2539 2539 old=old, new=new)
2540 2540 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2541 2541 ret = pushkey.push(self, namespace, key, old, new)
2542 2542 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2543 2543 ret=ret)
2544 2544 return ret
2545 2545
2546 2546 def listkeys(self, namespace):
2547 2547 self.hook('prelistkeys', throw=True, namespace=namespace)
2548 2548 self.ui.debug('listing keys for "%s"\n' % namespace)
2549 2549 values = pushkey.list(self, namespace)
2550 2550 self.hook('listkeys', namespace=namespace, values=values)
2551 2551 return values
2552 2552
2553 2553 def debugwireargs(self, one, two, three=None, four=None, five=None):
2554 2554 '''used to test argument passing over the wire'''
2555 2555 return "%s %s %s %s %s" % (one, two, three, four, five)
2556 2556
2557 2557 def savecommitmessage(self, text):
2558 2558 fp = self.opener('last-message.txt', 'wb')
2559 2559 try:
2560 2560 fp.write(text)
2561 2561 finally:
2562 2562 fp.close()
2563 2563 return self.pathto(fp.name[len(self.root) + 1:])
2564 2564
2565 2565 # used to avoid circular references so destructors work
2566 2566 def aftertrans(files):
2567 2567 renamefiles = [tuple(t) for t in files]
2568 2568 def a():
2569 2569 for src, dest in renamefiles:
2570 2570 try:
2571 2571 util.rename(src, dest)
2572 2572 except OSError: # journal file does not yet exist
2573 2573 pass
2574 2574 return a
2575 2575
2576 2576 def undoname(fn):
2577 2577 base, name = os.path.split(fn)
2578 2578 assert name.startswith('journal')
2579 2579 return os.path.join(base, name.replace('journal', 'undo', 1))
2580 2580
2581 2581 def instance(ui, path, create):
2582 2582 return localrepository(ui, util.urllocalpath(path), create)
2583 2583
2584 2584 def islocal(path):
2585 2585 return True
General Comments 0
You need to be logged in to leave comments. Login now