##// END OF EJS Templates
branchmap: updating triggers a write...
Martijn Pieters -
r41707:eb7ce452 default
parent child Browse files
Show More
@@ -1,561 +1,570 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 pycompat,
21 pycompat,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 stringutil,
26 stringutil,
27 )
27 )
28
28
29 calcsize = struct.calcsize
29 calcsize = struct.calcsize
30 pack_into = struct.pack_into
30 pack_into = struct.pack_into
31 unpack_from = struct.unpack_from
31 unpack_from = struct.unpack_from
32
32
33
33
34 ### Nearest subset relation
34 ### Nearest subset relation
35 # Nearest subset of filter X is a filter Y so that:
35 # Nearest subset of filter X is a filter Y so that:
36 # * Y is included in X,
36 # * Y is included in X,
37 # * X - Y is as small as possible.
37 # * X - Y is as small as possible.
38 # This create and ordering used for branchmap purpose.
38 # This create and ordering used for branchmap purpose.
39 # the ordering may be partial
39 # the ordering may be partial
40 subsettable = {None: 'visible',
40 subsettable = {None: 'visible',
41 'visible-hidden': 'visible',
41 'visible-hidden': 'visible',
42 'visible': 'served',
42 'visible': 'served',
43 'served': 'immutable',
43 'served': 'immutable',
44 'immutable': 'base'}
44 'immutable': 'base'}
45
45
46 def updatecache(repo):
46 def updatecache(repo):
47 cl = repo.changelog
47 cl = repo.changelog
48 filtername = repo.filtername
48 filtername = repo.filtername
49 bcache = repo._branchcaches.get(filtername)
49 bcache = repo._branchcaches.get(filtername)
50
50
51 revs = []
51 revs = []
52 if bcache is None or not bcache.validfor(repo):
52 if bcache is None or not bcache.validfor(repo):
53 bcache = branchcache.fromfile(repo)
53 bcache = branchcache.fromfile(repo)
54 if bcache is None:
54 if bcache is None:
55 subsetname = subsettable.get(filtername)
55 subsetname = subsettable.get(filtername)
56 if subsetname is None:
56 if subsetname is None:
57 bcache = branchcache()
57 bcache = branchcache()
58 else:
58 else:
59 subset = repo.filtered(subsetname)
59 subset = repo.filtered(subsetname)
60 bcache = subset.branchmap().copy()
60 bcache = subset.branchmap().copy()
61 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
61 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
62 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
62 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
63 revs.extend(cl.revs(start=bcache.tiprev + 1))
63 revs.extend(cl.revs(start=bcache.tiprev + 1))
64 if revs:
64 if revs:
65 bcache.update(repo, revs)
65 bcache.update(repo, revs)
66 bcache.write(repo)
67
66
68 assert bcache.validfor(repo), filtername
67 assert bcache.validfor(repo), filtername
69 repo._branchcaches[repo.filtername] = bcache
68 repo._branchcaches[repo.filtername] = bcache
70
69
71 def replacecache(repo, bm):
70 def replacecache(repo, bm):
72 """Replace the branchmap cache for a repo with a branch mapping.
71 """Replace the branchmap cache for a repo with a branch mapping.
73
72
74 This is likely only called during clone with a branch map from a remote.
73 This is likely only called during clone with a branch map from a remote.
75 """
74 """
76 cl = repo.changelog
75 cl = repo.changelog
77 clrev = cl.rev
76 clrev = cl.rev
78 clbranchinfo = cl.branchinfo
77 clbranchinfo = cl.branchinfo
79 rbheads = []
78 rbheads = []
80 closed = []
79 closed = []
81 for bheads in bm.itervalues():
80 for bheads in bm.itervalues():
82 rbheads.extend(bheads)
81 rbheads.extend(bheads)
83 for h in bheads:
82 for h in bheads:
84 r = clrev(h)
83 r = clrev(h)
85 b, c = clbranchinfo(r)
84 b, c = clbranchinfo(r)
86 if c:
85 if c:
87 closed.append(h)
86 closed.append(h)
88
87
89 if rbheads:
88 if rbheads:
90 rtiprev = max((int(clrev(node))
89 rtiprev = max((int(clrev(node))
91 for node in rbheads))
90 for node in rbheads))
92 cache = branchcache(bm,
91 cache = branchcache(bm,
93 repo[rtiprev].node(),
92 repo[rtiprev].node(),
94 rtiprev,
93 rtiprev,
95 closednodes=closed)
94 closednodes=closed)
96
95
97 # Try to stick it as low as possible
96 # Try to stick it as low as possible
98 # filter above served are unlikely to be fetch from a clone
97 # filter above served are unlikely to be fetch from a clone
99 for candidate in ('base', 'immutable', 'served'):
98 for candidate in ('base', 'immutable', 'served'):
100 rview = repo.filtered(candidate)
99 rview = repo.filtered(candidate)
101 if cache.validfor(rview):
100 if cache.validfor(rview):
102 repo._branchcaches[candidate] = cache
101 repo._branchcaches[candidate] = cache
103 cache.write(rview)
102 cache.write(rview)
104 break
103 break
105
104
106 class branchcache(dict):
105 class branchcache(dict):
107 """A dict like object that hold branches heads cache.
106 """A dict like object that hold branches heads cache.
108
107
109 This cache is used to avoid costly computations to determine all the
108 This cache is used to avoid costly computations to determine all the
110 branch heads of a repo.
109 branch heads of a repo.
111
110
112 The cache is serialized on disk in the following format:
111 The cache is serialized on disk in the following format:
113
112
114 <tip hex node> <tip rev number> [optional filtered repo hex hash]
113 <tip hex node> <tip rev number> [optional filtered repo hex hash]
115 <branch head hex node> <open/closed state> <branch name>
114 <branch head hex node> <open/closed state> <branch name>
116 <branch head hex node> <open/closed state> <branch name>
115 <branch head hex node> <open/closed state> <branch name>
117 ...
116 ...
118
117
119 The first line is used to check if the cache is still valid. If the
118 The first line is used to check if the cache is still valid. If the
120 branch cache is for a filtered repo view, an optional third hash is
119 branch cache is for a filtered repo view, an optional third hash is
121 included that hashes the hashes of all filtered revisions.
120 included that hashes the hashes of all filtered revisions.
122
121
123 The open/closed state is represented by a single letter 'o' or 'c'.
122 The open/closed state is represented by a single letter 'o' or 'c'.
124 This field can be used to avoid changelog reads when determining if a
123 This field can be used to avoid changelog reads when determining if a
125 branch head closes a branch or not.
124 branch head closes a branch or not.
126 """
125 """
127 @classmethod
126 @classmethod
128 def fromfile(cls, repo):
127 def fromfile(cls, repo):
129 f = None
128 f = None
130 try:
129 try:
131 f = repo.cachevfs(cls._filename(repo))
130 f = repo.cachevfs(cls._filename(repo))
132 lineiter = iter(f)
131 lineiter = iter(f)
133 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
132 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
134 last, lrev = cachekey[:2]
133 last, lrev = cachekey[:2]
135 last, lrev = bin(last), int(lrev)
134 last, lrev = bin(last), int(lrev)
136 filteredhash = None
135 filteredhash = None
137 if len(cachekey) > 2:
136 if len(cachekey) > 2:
138 filteredhash = bin(cachekey[2])
137 filteredhash = bin(cachekey[2])
139 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
138 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
140 if not bcache.validfor(repo):
139 if not bcache.validfor(repo):
141 # invalidate the cache
140 # invalidate the cache
142 raise ValueError(r'tip differs')
141 raise ValueError(r'tip differs')
143 cl = repo.changelog
142 cl = repo.changelog
144 for line in lineiter:
143 for line in lineiter:
145 line = line.rstrip('\n')
144 line = line.rstrip('\n')
146 if not line:
145 if not line:
147 continue
146 continue
148 node, state, label = line.split(" ", 2)
147 node, state, label = line.split(" ", 2)
149 if state not in 'oc':
148 if state not in 'oc':
150 raise ValueError(r'invalid branch state')
149 raise ValueError(r'invalid branch state')
151 label = encoding.tolocal(label.strip())
150 label = encoding.tolocal(label.strip())
152 node = bin(node)
151 node = bin(node)
153 if not cl.hasnode(node):
152 if not cl.hasnode(node):
154 raise ValueError(
153 raise ValueError(
155 r'node %s does not exist' % pycompat.sysstr(hex(node)))
154 r'node %s does not exist' % pycompat.sysstr(hex(node)))
156 bcache.setdefault(label, []).append(node)
155 bcache.setdefault(label, []).append(node)
157 if state == 'c':
156 if state == 'c':
158 bcache._closednodes.add(node)
157 bcache._closednodes.add(node)
159
158
160 except (IOError, OSError):
159 except (IOError, OSError):
161 return None
160 return None
162
161
163 except Exception as inst:
162 except Exception as inst:
164 if repo.ui.debugflag:
163 if repo.ui.debugflag:
165 msg = 'invalid branchheads cache'
164 msg = 'invalid branchheads cache'
166 if repo.filtername is not None:
165 if repo.filtername is not None:
167 msg += ' (%s)' % repo.filtername
166 msg += ' (%s)' % repo.filtername
168 msg += ': %s\n'
167 msg += ': %s\n'
169 repo.ui.debug(msg % pycompat.bytestr(inst))
168 repo.ui.debug(msg % pycompat.bytestr(inst))
170 bcache = None
169 bcache = None
171
170
172 finally:
171 finally:
173 if f:
172 if f:
174 f.close()
173 f.close()
175
174
176 return bcache
175 return bcache
177
176
178 @staticmethod
177 @staticmethod
179 def _filename(repo):
178 def _filename(repo):
180 """name of a branchcache file for a given repo or repoview"""
179 """name of a branchcache file for a given repo or repoview"""
181 filename = "branch2"
180 filename = "branch2"
182 if repo.filtername:
181 if repo.filtername:
183 filename = '%s-%s' % (filename, repo.filtername)
182 filename = '%s-%s' % (filename, repo.filtername)
184 return filename
183 return filename
185
184
186 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
185 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
187 filteredhash=None, closednodes=None):
186 filteredhash=None, closednodes=None):
188 super(branchcache, self).__init__(entries)
187 super(branchcache, self).__init__(entries)
189 self.tipnode = tipnode
188 self.tipnode = tipnode
190 self.tiprev = tiprev
189 self.tiprev = tiprev
191 self.filteredhash = filteredhash
190 self.filteredhash = filteredhash
192 # closednodes is a set of nodes that close their branch. If the branch
191 # closednodes is a set of nodes that close their branch. If the branch
193 # cache has been updated, it may contain nodes that are no longer
192 # cache has been updated, it may contain nodes that are no longer
194 # heads.
193 # heads.
195 if closednodes is None:
194 if closednodes is None:
196 self._closednodes = set()
195 self._closednodes = set()
197 else:
196 else:
198 self._closednodes = closednodes
197 self._closednodes = closednodes
199
198
200 def validfor(self, repo):
199 def validfor(self, repo):
201 """Is the cache content valid regarding a repo
200 """Is the cache content valid regarding a repo
202
201
203 - False when cached tipnode is unknown or if we detect a strip.
202 - False when cached tipnode is unknown or if we detect a strip.
204 - True when cache is up to date or a subset of current repo."""
203 - True when cache is up to date or a subset of current repo."""
205 try:
204 try:
206 return ((self.tipnode == repo.changelog.node(self.tiprev))
205 return ((self.tipnode == repo.changelog.node(self.tiprev))
207 and (self.filteredhash == \
206 and (self.filteredhash == \
208 scmutil.filteredhash(repo, self.tiprev)))
207 scmutil.filteredhash(repo, self.tiprev)))
209 except IndexError:
208 except IndexError:
210 return False
209 return False
211
210
212 def _branchtip(self, heads):
211 def _branchtip(self, heads):
213 '''Return tuple with last open head in heads and false,
212 '''Return tuple with last open head in heads and false,
214 otherwise return last closed head and true.'''
213 otherwise return last closed head and true.'''
215 tip = heads[-1]
214 tip = heads[-1]
216 closed = True
215 closed = True
217 for h in reversed(heads):
216 for h in reversed(heads):
218 if h not in self._closednodes:
217 if h not in self._closednodes:
219 tip = h
218 tip = h
220 closed = False
219 closed = False
221 break
220 break
222 return tip, closed
221 return tip, closed
223
222
224 def branchtip(self, branch):
223 def branchtip(self, branch):
225 '''Return the tipmost open head on branch head, otherwise return the
224 '''Return the tipmost open head on branch head, otherwise return the
226 tipmost closed head on branch.
225 tipmost closed head on branch.
227 Raise KeyError for unknown branch.'''
226 Raise KeyError for unknown branch.'''
228 return self._branchtip(self[branch])[0]
227 return self._branchtip(self[branch])[0]
229
228
230 def iteropen(self, nodes):
229 def iteropen(self, nodes):
231 return (n for n in nodes if n not in self._closednodes)
230 return (n for n in nodes if n not in self._closednodes)
232
231
233 def branchheads(self, branch, closed=False):
232 def branchheads(self, branch, closed=False):
234 heads = self[branch]
233 heads = self[branch]
235 if not closed:
234 if not closed:
236 heads = list(self.iteropen(heads))
235 heads = list(self.iteropen(heads))
237 return heads
236 return heads
238
237
239 def iterbranches(self):
238 def iterbranches(self):
240 for bn, heads in self.iteritems():
239 for bn, heads in self.iteritems():
241 yield (bn, heads) + self._branchtip(heads)
240 yield (bn, heads) + self._branchtip(heads)
242
241
243 def copy(self):
242 def copy(self):
244 """return an deep copy of the branchcache object"""
243 """return an deep copy of the branchcache object"""
245 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
244 return type(self)(
246 self._closednodes)
245 self, self.tipnode, self.tiprev, self.filteredhash,
246 self._closednodes)
247
247
248 def write(self, repo):
248 def write(self, repo):
249 try:
249 try:
250 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
250 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
251 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
251 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
252 if self.filteredhash is not None:
252 if self.filteredhash is not None:
253 cachekey.append(hex(self.filteredhash))
253 cachekey.append(hex(self.filteredhash))
254 f.write(" ".join(cachekey) + '\n')
254 f.write(" ".join(cachekey) + '\n')
255 nodecount = 0
255 nodecount = 0
256 for label, nodes in sorted(self.iteritems()):
256 for label, nodes in sorted(self.iteritems()):
257 for node in nodes:
257 for node in nodes:
258 nodecount += 1
258 nodecount += 1
259 if node in self._closednodes:
259 if node in self._closednodes:
260 state = 'c'
260 state = 'c'
261 else:
261 else:
262 state = 'o'
262 state = 'o'
263 f.write("%s %s %s\n" % (hex(node), state,
263 f.write("%s %s %s\n" % (hex(node), state,
264 encoding.fromlocal(label)))
264 encoding.fromlocal(label)))
265 f.close()
265 f.close()
266 repo.ui.log('branchcache',
266 repo.ui.log('branchcache',
267 'wrote %s branch cache with %d labels and %d nodes\n',
267 'wrote %s branch cache with %d labels and %d nodes\n',
268 repo.filtername, len(self), nodecount)
268 repo.filtername, len(self), nodecount)
269 except (IOError, OSError, error.Abort) as inst:
269 except (IOError, OSError, error.Abort) as inst:
270 # Abort may be raised by read only opener, so log and continue
270 # Abort may be raised by read only opener, so log and continue
271 repo.ui.debug("couldn't write branch cache: %s\n" %
271 repo.ui.debug("couldn't write branch cache: %s\n" %
272 stringutil.forcebytestr(inst))
272 stringutil.forcebytestr(inst))
273
273
274 def update(self, repo, revgen):
274 def update(self, repo, revgen):
275 """Given a branchhead cache, self, that may have extra nodes or be
275 """Given a branchhead cache, self, that may have extra nodes or be
276 missing heads, and a generator of nodes that are strictly a superset of
276 missing heads, and a generator of nodes that are strictly a superset of
277 heads missing, this function updates self to be correct.
277 heads missing, this function updates self to be correct.
278 """
278 """
279 starttime = util.timer()
279 starttime = util.timer()
280 cl = repo.changelog
280 cl = repo.changelog
281 # collect new branch entries
281 # collect new branch entries
282 newbranches = {}
282 newbranches = {}
283 getbranchinfo = repo.revbranchcache().branchinfo
283 getbranchinfo = repo.revbranchcache().branchinfo
284 for r in revgen:
284 for r in revgen:
285 branch, closesbranch = getbranchinfo(r)
285 branch, closesbranch = getbranchinfo(r)
286 newbranches.setdefault(branch, []).append(r)
286 newbranches.setdefault(branch, []).append(r)
287 if closesbranch:
287 if closesbranch:
288 self._closednodes.add(cl.node(r))
288 self._closednodes.add(cl.node(r))
289
289
290 # fetch current topological heads to speed up filtering
290 # fetch current topological heads to speed up filtering
291 topoheads = set(cl.headrevs())
291 topoheads = set(cl.headrevs())
292
292
293 # if older branchheads are reachable from new ones, they aren't
293 # if older branchheads are reachable from new ones, they aren't
294 # really branchheads. Note checking parents is insufficient:
294 # really branchheads. Note checking parents is insufficient:
295 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
295 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
296 for branch, newheadrevs in newbranches.iteritems():
296 for branch, newheadrevs in newbranches.iteritems():
297 bheads = self.setdefault(branch, [])
297 bheads = self.setdefault(branch, [])
298 bheadset = set(cl.rev(node) for node in bheads)
298 bheadset = set(cl.rev(node) for node in bheads)
299
299
300 # This have been tested True on all internal usage of this function.
300 # This have been tested True on all internal usage of this function.
301 # run it again in case of doubt
301 # run it again in case of doubt
302 # assert not (set(bheadrevs) & set(newheadrevs))
302 # assert not (set(bheadrevs) & set(newheadrevs))
303 bheadset.update(newheadrevs)
303 bheadset.update(newheadrevs)
304
304
305 # This prunes out two kinds of heads - heads that are superseded by
305 # This prunes out two kinds of heads - heads that are superseded by
306 # a head in newheadrevs, and newheadrevs that are not heads because
306 # a head in newheadrevs, and newheadrevs that are not heads because
307 # an existing head is their descendant.
307 # an existing head is their descendant.
308 uncertain = bheadset - topoheads
308 uncertain = bheadset - topoheads
309 if uncertain:
309 if uncertain:
310 floorrev = min(uncertain)
310 floorrev = min(uncertain)
311 ancestors = set(cl.ancestors(newheadrevs, floorrev))
311 ancestors = set(cl.ancestors(newheadrevs, floorrev))
312 bheadset -= ancestors
312 bheadset -= ancestors
313 bheadrevs = sorted(bheadset)
313 bheadrevs = sorted(bheadset)
314 self[branch] = [cl.node(rev) for rev in bheadrevs]
314 self[branch] = [cl.node(rev) for rev in bheadrevs]
315 tiprev = bheadrevs[-1]
315 tiprev = bheadrevs[-1]
316 if tiprev > self.tiprev:
316 if tiprev > self.tiprev:
317 self.tipnode = cl.node(tiprev)
317 self.tipnode = cl.node(tiprev)
318 self.tiprev = tiprev
318 self.tiprev = tiprev
319
319
320 if not self.validfor(repo):
320 if not self.validfor(repo):
321 # cache key are not valid anymore
321 # cache key are not valid anymore
322 self.tipnode = nullid
322 self.tipnode = nullid
323 self.tiprev = nullrev
323 self.tiprev = nullrev
324 for heads in self.values():
324 for heads in self.values():
325 tiprev = max(cl.rev(node) for node in heads)
325 tiprev = max(cl.rev(node) for node in heads)
326 if tiprev > self.tiprev:
326 if tiprev > self.tiprev:
327 self.tipnode = cl.node(tiprev)
327 self.tipnode = cl.node(tiprev)
328 self.tiprev = tiprev
328 self.tiprev = tiprev
329 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
329 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
330
330
331 duration = util.timer() - starttime
331 duration = util.timer() - starttime
332 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
332 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
333 repo.filtername, duration)
333 repo.filtername, duration)
334
334
335 self.write(repo)
336
337
338 class remotebranchcache(branchcache):
339 """Branchmap info for a remote connection, should not write locally"""
340 def write(self, repo):
341 pass
342
343
335 # Revision branch info cache
344 # Revision branch info cache
336
345
337 _rbcversion = '-v1'
346 _rbcversion = '-v1'
338 _rbcnames = 'rbc-names' + _rbcversion
347 _rbcnames = 'rbc-names' + _rbcversion
339 _rbcrevs = 'rbc-revs' + _rbcversion
348 _rbcrevs = 'rbc-revs' + _rbcversion
340 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
349 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
341 _rbcrecfmt = '>4sI'
350 _rbcrecfmt = '>4sI'
342 _rbcrecsize = calcsize(_rbcrecfmt)
351 _rbcrecsize = calcsize(_rbcrecfmt)
343 _rbcnodelen = 4
352 _rbcnodelen = 4
344 _rbcbranchidxmask = 0x7fffffff
353 _rbcbranchidxmask = 0x7fffffff
345 _rbccloseflag = 0x80000000
354 _rbccloseflag = 0x80000000
346
355
347 class revbranchcache(object):
356 class revbranchcache(object):
348 """Persistent cache, mapping from revision number to branch name and close.
357 """Persistent cache, mapping from revision number to branch name and close.
349 This is a low level cache, independent of filtering.
358 This is a low level cache, independent of filtering.
350
359
351 Branch names are stored in rbc-names in internal encoding separated by 0.
360 Branch names are stored in rbc-names in internal encoding separated by 0.
352 rbc-names is append-only, and each branch name is only stored once and will
361 rbc-names is append-only, and each branch name is only stored once and will
353 thus have a unique index.
362 thus have a unique index.
354
363
355 The branch info for each revision is stored in rbc-revs as constant size
364 The branch info for each revision is stored in rbc-revs as constant size
356 records. The whole file is read into memory, but it is only 'parsed' on
365 records. The whole file is read into memory, but it is only 'parsed' on
357 demand. The file is usually append-only but will be truncated if repo
366 demand. The file is usually append-only but will be truncated if repo
358 modification is detected.
367 modification is detected.
359 The record for each revision contains the first 4 bytes of the
368 The record for each revision contains the first 4 bytes of the
360 corresponding node hash, and the record is only used if it still matches.
369 corresponding node hash, and the record is only used if it still matches.
361 Even a completely trashed rbc-revs fill thus still give the right result
370 Even a completely trashed rbc-revs fill thus still give the right result
362 while converging towards full recovery ... assuming no incorrectly matching
371 while converging towards full recovery ... assuming no incorrectly matching
363 node hashes.
372 node hashes.
364 The record also contains 4 bytes where 31 bits contains the index of the
373 The record also contains 4 bytes where 31 bits contains the index of the
365 branch and the last bit indicate that it is a branch close commit.
374 branch and the last bit indicate that it is a branch close commit.
366 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
375 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
367 and will grow with it but be 1/8th of its size.
376 and will grow with it but be 1/8th of its size.
368 """
377 """
369
378
370 def __init__(self, repo, readonly=True):
379 def __init__(self, repo, readonly=True):
371 assert repo.filtername is None
380 assert repo.filtername is None
372 self._repo = repo
381 self._repo = repo
373 self._names = [] # branch names in local encoding with static index
382 self._names = [] # branch names in local encoding with static index
374 self._rbcrevs = bytearray()
383 self._rbcrevs = bytearray()
375 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
384 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
376 try:
385 try:
377 bndata = repo.cachevfs.read(_rbcnames)
386 bndata = repo.cachevfs.read(_rbcnames)
378 self._rbcsnameslen = len(bndata) # for verification before writing
387 self._rbcsnameslen = len(bndata) # for verification before writing
379 if bndata:
388 if bndata:
380 self._names = [encoding.tolocal(bn)
389 self._names = [encoding.tolocal(bn)
381 for bn in bndata.split('\0')]
390 for bn in bndata.split('\0')]
382 except (IOError, OSError):
391 except (IOError, OSError):
383 if readonly:
392 if readonly:
384 # don't try to use cache - fall back to the slow path
393 # don't try to use cache - fall back to the slow path
385 self.branchinfo = self._branchinfo
394 self.branchinfo = self._branchinfo
386
395
387 if self._names:
396 if self._names:
388 try:
397 try:
389 data = repo.cachevfs.read(_rbcrevs)
398 data = repo.cachevfs.read(_rbcrevs)
390 self._rbcrevs[:] = data
399 self._rbcrevs[:] = data
391 except (IOError, OSError) as inst:
400 except (IOError, OSError) as inst:
392 repo.ui.debug("couldn't read revision branch cache: %s\n" %
401 repo.ui.debug("couldn't read revision branch cache: %s\n" %
393 stringutil.forcebytestr(inst))
402 stringutil.forcebytestr(inst))
394 # remember number of good records on disk
403 # remember number of good records on disk
395 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
404 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
396 len(repo.changelog))
405 len(repo.changelog))
397 if self._rbcrevslen == 0:
406 if self._rbcrevslen == 0:
398 self._names = []
407 self._names = []
399 self._rbcnamescount = len(self._names) # number of names read at
408 self._rbcnamescount = len(self._names) # number of names read at
400 # _rbcsnameslen
409 # _rbcsnameslen
401
410
402 def _clear(self):
411 def _clear(self):
403 self._rbcsnameslen = 0
412 self._rbcsnameslen = 0
404 del self._names[:]
413 del self._names[:]
405 self._rbcnamescount = 0
414 self._rbcnamescount = 0
406 self._rbcrevslen = len(self._repo.changelog)
415 self._rbcrevslen = len(self._repo.changelog)
407 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
416 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
408 util.clearcachedproperty(self, '_namesreverse')
417 util.clearcachedproperty(self, '_namesreverse')
409
418
410 @util.propertycache
419 @util.propertycache
411 def _namesreverse(self):
420 def _namesreverse(self):
412 return dict((b, r) for r, b in enumerate(self._names))
421 return dict((b, r) for r, b in enumerate(self._names))
413
422
414 def branchinfo(self, rev):
423 def branchinfo(self, rev):
415 """Return branch name and close flag for rev, using and updating
424 """Return branch name and close flag for rev, using and updating
416 persistent cache."""
425 persistent cache."""
417 changelog = self._repo.changelog
426 changelog = self._repo.changelog
418 rbcrevidx = rev * _rbcrecsize
427 rbcrevidx = rev * _rbcrecsize
419
428
420 # avoid negative index, changelog.read(nullrev) is fast without cache
429 # avoid negative index, changelog.read(nullrev) is fast without cache
421 if rev == nullrev:
430 if rev == nullrev:
422 return changelog.branchinfo(rev)
431 return changelog.branchinfo(rev)
423
432
424 # if requested rev isn't allocated, grow and cache the rev info
433 # if requested rev isn't allocated, grow and cache the rev info
425 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
434 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
426 return self._branchinfo(rev)
435 return self._branchinfo(rev)
427
436
428 # fast path: extract data from cache, use it if node is matching
437 # fast path: extract data from cache, use it if node is matching
429 reponode = changelog.node(rev)[:_rbcnodelen]
438 reponode = changelog.node(rev)[:_rbcnodelen]
430 cachenode, branchidx = unpack_from(
439 cachenode, branchidx = unpack_from(
431 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
440 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
432 close = bool(branchidx & _rbccloseflag)
441 close = bool(branchidx & _rbccloseflag)
433 if close:
442 if close:
434 branchidx &= _rbcbranchidxmask
443 branchidx &= _rbcbranchidxmask
435 if cachenode == '\0\0\0\0':
444 if cachenode == '\0\0\0\0':
436 pass
445 pass
437 elif cachenode == reponode:
446 elif cachenode == reponode:
438 try:
447 try:
439 return self._names[branchidx], close
448 return self._names[branchidx], close
440 except IndexError:
449 except IndexError:
441 # recover from invalid reference to unknown branch
450 # recover from invalid reference to unknown branch
442 self._repo.ui.debug("referenced branch names not found"
451 self._repo.ui.debug("referenced branch names not found"
443 " - rebuilding revision branch cache from scratch\n")
452 " - rebuilding revision branch cache from scratch\n")
444 self._clear()
453 self._clear()
445 else:
454 else:
446 # rev/node map has changed, invalidate the cache from here up
455 # rev/node map has changed, invalidate the cache from here up
447 self._repo.ui.debug("history modification detected - truncating "
456 self._repo.ui.debug("history modification detected - truncating "
448 "revision branch cache to revision %d\n" % rev)
457 "revision branch cache to revision %d\n" % rev)
449 truncate = rbcrevidx + _rbcrecsize
458 truncate = rbcrevidx + _rbcrecsize
450 del self._rbcrevs[truncate:]
459 del self._rbcrevs[truncate:]
451 self._rbcrevslen = min(self._rbcrevslen, truncate)
460 self._rbcrevslen = min(self._rbcrevslen, truncate)
452
461
453 # fall back to slow path and make sure it will be written to disk
462 # fall back to slow path and make sure it will be written to disk
454 return self._branchinfo(rev)
463 return self._branchinfo(rev)
455
464
456 def _branchinfo(self, rev):
465 def _branchinfo(self, rev):
457 """Retrieve branch info from changelog and update _rbcrevs"""
466 """Retrieve branch info from changelog and update _rbcrevs"""
458 changelog = self._repo.changelog
467 changelog = self._repo.changelog
459 b, close = changelog.branchinfo(rev)
468 b, close = changelog.branchinfo(rev)
460 if b in self._namesreverse:
469 if b in self._namesreverse:
461 branchidx = self._namesreverse[b]
470 branchidx = self._namesreverse[b]
462 else:
471 else:
463 branchidx = len(self._names)
472 branchidx = len(self._names)
464 self._names.append(b)
473 self._names.append(b)
465 self._namesreverse[b] = branchidx
474 self._namesreverse[b] = branchidx
466 reponode = changelog.node(rev)
475 reponode = changelog.node(rev)
467 if close:
476 if close:
468 branchidx |= _rbccloseflag
477 branchidx |= _rbccloseflag
469 self._setcachedata(rev, reponode, branchidx)
478 self._setcachedata(rev, reponode, branchidx)
470 return b, close
479 return b, close
471
480
472 def setdata(self, branch, rev, node, close):
481 def setdata(self, branch, rev, node, close):
473 """add new data information to the cache"""
482 """add new data information to the cache"""
474 if branch in self._namesreverse:
483 if branch in self._namesreverse:
475 branchidx = self._namesreverse[branch]
484 branchidx = self._namesreverse[branch]
476 else:
485 else:
477 branchidx = len(self._names)
486 branchidx = len(self._names)
478 self._names.append(branch)
487 self._names.append(branch)
479 self._namesreverse[branch] = branchidx
488 self._namesreverse[branch] = branchidx
480 if close:
489 if close:
481 branchidx |= _rbccloseflag
490 branchidx |= _rbccloseflag
482 self._setcachedata(rev, node, branchidx)
491 self._setcachedata(rev, node, branchidx)
483 # If no cache data were readable (non exists, bad permission, etc)
492 # If no cache data were readable (non exists, bad permission, etc)
484 # the cache was bypassing itself by setting:
493 # the cache was bypassing itself by setting:
485 #
494 #
486 # self.branchinfo = self._branchinfo
495 # self.branchinfo = self._branchinfo
487 #
496 #
488 # Since we now have data in the cache, we need to drop this bypassing.
497 # Since we now have data in the cache, we need to drop this bypassing.
489 if r'branchinfo' in vars(self):
498 if r'branchinfo' in vars(self):
490 del self.branchinfo
499 del self.branchinfo
491
500
492 def _setcachedata(self, rev, node, branchidx):
501 def _setcachedata(self, rev, node, branchidx):
493 """Writes the node's branch data to the in-memory cache data."""
502 """Writes the node's branch data to the in-memory cache data."""
494 if rev == nullrev:
503 if rev == nullrev:
495 return
504 return
496 rbcrevidx = rev * _rbcrecsize
505 rbcrevidx = rev * _rbcrecsize
497 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
506 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
498 self._rbcrevs.extend('\0' *
507 self._rbcrevs.extend('\0' *
499 (len(self._repo.changelog) * _rbcrecsize -
508 (len(self._repo.changelog) * _rbcrecsize -
500 len(self._rbcrevs)))
509 len(self._rbcrevs)))
501 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
510 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
502 self._rbcrevslen = min(self._rbcrevslen, rev)
511 self._rbcrevslen = min(self._rbcrevslen, rev)
503
512
504 tr = self._repo.currenttransaction()
513 tr = self._repo.currenttransaction()
505 if tr:
514 if tr:
506 tr.addfinalize('write-revbranchcache', self.write)
515 tr.addfinalize('write-revbranchcache', self.write)
507
516
508 def write(self, tr=None):
517 def write(self, tr=None):
509 """Save branch cache if it is dirty."""
518 """Save branch cache if it is dirty."""
510 repo = self._repo
519 repo = self._repo
511 wlock = None
520 wlock = None
512 step = ''
521 step = ''
513 try:
522 try:
514 if self._rbcnamescount < len(self._names):
523 if self._rbcnamescount < len(self._names):
515 step = ' names'
524 step = ' names'
516 wlock = repo.wlock(wait=False)
525 wlock = repo.wlock(wait=False)
517 if self._rbcnamescount != 0:
526 if self._rbcnamescount != 0:
518 f = repo.cachevfs.open(_rbcnames, 'ab')
527 f = repo.cachevfs.open(_rbcnames, 'ab')
519 if f.tell() == self._rbcsnameslen:
528 if f.tell() == self._rbcsnameslen:
520 f.write('\0')
529 f.write('\0')
521 else:
530 else:
522 f.close()
531 f.close()
523 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
532 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
524 self._rbcnamescount = 0
533 self._rbcnamescount = 0
525 self._rbcrevslen = 0
534 self._rbcrevslen = 0
526 if self._rbcnamescount == 0:
535 if self._rbcnamescount == 0:
527 # before rewriting names, make sure references are removed
536 # before rewriting names, make sure references are removed
528 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
537 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
529 f = repo.cachevfs.open(_rbcnames, 'wb')
538 f = repo.cachevfs.open(_rbcnames, 'wb')
530 f.write('\0'.join(encoding.fromlocal(b)
539 f.write('\0'.join(encoding.fromlocal(b)
531 for b in self._names[self._rbcnamescount:]))
540 for b in self._names[self._rbcnamescount:]))
532 self._rbcsnameslen = f.tell()
541 self._rbcsnameslen = f.tell()
533 f.close()
542 f.close()
534 self._rbcnamescount = len(self._names)
543 self._rbcnamescount = len(self._names)
535
544
536 start = self._rbcrevslen * _rbcrecsize
545 start = self._rbcrevslen * _rbcrecsize
537 if start != len(self._rbcrevs):
546 if start != len(self._rbcrevs):
538 step = ''
547 step = ''
539 if wlock is None:
548 if wlock is None:
540 wlock = repo.wlock(wait=False)
549 wlock = repo.wlock(wait=False)
541 revs = min(len(repo.changelog),
550 revs = min(len(repo.changelog),
542 len(self._rbcrevs) // _rbcrecsize)
551 len(self._rbcrevs) // _rbcrecsize)
543 f = repo.cachevfs.open(_rbcrevs, 'ab')
552 f = repo.cachevfs.open(_rbcrevs, 'ab')
544 if f.tell() != start:
553 if f.tell() != start:
545 repo.ui.debug("truncating cache/%s to %d\n"
554 repo.ui.debug("truncating cache/%s to %d\n"
546 % (_rbcrevs, start))
555 % (_rbcrevs, start))
547 f.seek(start)
556 f.seek(start)
548 if f.tell() != start:
557 if f.tell() != start:
549 start = 0
558 start = 0
550 f.seek(start)
559 f.seek(start)
551 f.truncate()
560 f.truncate()
552 end = revs * _rbcrecsize
561 end = revs * _rbcrecsize
553 f.write(self._rbcrevs[start:end])
562 f.write(self._rbcrevs[start:end])
554 f.close()
563 f.close()
555 self._rbcrevslen = revs
564 self._rbcrevslen = revs
556 except (IOError, OSError, error.Abort, error.LockError) as inst:
565 except (IOError, OSError, error.Abort, error.LockError) as inst:
557 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
566 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
558 % (step, stringutil.forcebytestr(inst)))
567 % (step, stringutil.forcebytestr(inst)))
559 finally:
568 finally:
560 if wlock is not None:
569 if wlock is not None:
561 wlock.release()
570 wlock.release()
@@ -1,533 +1,533 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 """Return a tuple (common, anyincoming, heads) used to identify the common
31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 subset of nodes between repo and remote.
32 subset of nodes between repo and remote.
33
33
34 "common" is a list of (at least) the heads of the common subset.
34 "common" is a list of (at least) the heads of the common subset.
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 locally. If remote does not support getbundle, this actually is a list of
36 locally. If remote does not support getbundle, this actually is a list of
37 roots of the nodes that would be incoming, to be supplied to
37 roots of the nodes that would be incoming, to be supplied to
38 changegroupsubset. No code except for pull should be relying on this fact
38 changegroupsubset. No code except for pull should be relying on this fact
39 any longer.
39 any longer.
40 "heads" is either the supplied heads, or else the remote's heads.
40 "heads" is either the supplied heads, or else the remote's heads.
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 these nodes. Changeset outside of this set won't be considered (and
42 these nodes. Changeset outside of this set won't be considered (and
43 won't appears in "common")
43 won't appears in "common")
44
44
45 If you pass heads and they are all known locally, the response lists just
45 If you pass heads and they are all known locally, the response lists just
46 these heads in "common" and in "heads".
46 these heads in "common" and in "heads".
47
47
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 extensions a good hook into outgoing.
49 extensions a good hook into outgoing.
50 """
50 """
51
51
52 if not remote.capable('getbundle'):
52 if not remote.capable('getbundle'):
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54
54
55 if heads:
55 if heads:
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 if all(knownnode(h) for h in heads):
57 if all(knownnode(h) for h in heads):
58 return (heads, False, heads)
58 return (heads, False, heads)
59
59
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 abortwhenunrelated=not force,
61 abortwhenunrelated=not force,
62 ancestorsof=ancestorsof)
62 ancestorsof=ancestorsof)
63 common, anyinc, srvheads = res
63 common, anyinc, srvheads = res
64 return (list(common), anyinc, heads or list(srvheads))
64 return (list(common), anyinc, heads or list(srvheads))
65
65
66 class outgoing(object):
66 class outgoing(object):
67 '''Represents the set of nodes present in a local repo but not in a
67 '''Represents the set of nodes present in a local repo but not in a
68 (possibly) remote one.
68 (possibly) remote one.
69
69
70 Members:
70 Members:
71
71
72 missing is a list of all nodes present in local but not in remote.
72 missing is a list of all nodes present in local but not in remote.
73 common is a list of all nodes shared between the two repos.
73 common is a list of all nodes shared between the two repos.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 missingheads is the list of heads of missing.
75 missingheads is the list of heads of missing.
76 commonheads is the list of heads of common.
76 commonheads is the list of heads of common.
77
77
78 The sets are computed on demand from the heads, unless provided upfront
78 The sets are computed on demand from the heads, unless provided upfront
79 by discovery.'''
79 by discovery.'''
80
80
81 def __init__(self, repo, commonheads=None, missingheads=None,
81 def __init__(self, repo, commonheads=None, missingheads=None,
82 missingroots=None):
82 missingroots=None):
83 # at least one of them must not be set
83 # at least one of them must not be set
84 assert None in (commonheads, missingroots)
84 assert None in (commonheads, missingroots)
85 cl = repo.changelog
85 cl = repo.changelog
86 if missingheads is None:
86 if missingheads is None:
87 missingheads = cl.heads()
87 missingheads = cl.heads()
88 if missingroots:
88 if missingroots:
89 discbases = []
89 discbases = []
90 for n in missingroots:
90 for n in missingroots:
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 # TODO remove call to nodesbetween.
92 # TODO remove call to nodesbetween.
93 # TODO populate attributes on outgoing instance instead of setting
93 # TODO populate attributes on outgoing instance instead of setting
94 # discbases.
94 # discbases.
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 included = set(csets)
96 included = set(csets)
97 missingheads = heads
97 missingheads = heads
98 commonheads = [n for n in discbases if n not in included]
98 commonheads = [n for n in discbases if n not in included]
99 elif not commonheads:
99 elif not commonheads:
100 commonheads = [nullid]
100 commonheads = [nullid]
101 self.commonheads = commonheads
101 self.commonheads = commonheads
102 self.missingheads = missingheads
102 self.missingheads = missingheads
103 self._revlog = cl
103 self._revlog = cl
104 self._common = None
104 self._common = None
105 self._missing = None
105 self._missing = None
106 self.excluded = []
106 self.excluded = []
107
107
108 def _computecommonmissing(self):
108 def _computecommonmissing(self):
109 sets = self._revlog.findcommonmissing(self.commonheads,
109 sets = self._revlog.findcommonmissing(self.commonheads,
110 self.missingheads)
110 self.missingheads)
111 self._common, self._missing = sets
111 self._common, self._missing = sets
112
112
113 @util.propertycache
113 @util.propertycache
114 def common(self):
114 def common(self):
115 if self._common is None:
115 if self._common is None:
116 self._computecommonmissing()
116 self._computecommonmissing()
117 return self._common
117 return self._common
118
118
119 @util.propertycache
119 @util.propertycache
120 def missing(self):
120 def missing(self):
121 if self._missing is None:
121 if self._missing is None:
122 self._computecommonmissing()
122 self._computecommonmissing()
123 return self._missing
123 return self._missing
124
124
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 commoninc=None, portable=False):
126 commoninc=None, portable=False):
127 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
128 not in other.
128 not in other.
129
129
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
132 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
133
133
134 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136
136
137 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
138 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
139 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
140 og = outgoing(repo, None, None)
140 og = outgoing(repo, None, None)
141
141
142 # get common set if not provided
142 # get common set if not provided
143 if commoninc is None:
143 if commoninc is None:
144 commoninc = findcommonincoming(repo, other, force=force,
144 commoninc = findcommonincoming(repo, other, force=force,
145 ancestorsof=onlyheads)
145 ancestorsof=onlyheads)
146 og.commonheads, _any, _hds = commoninc
146 og.commonheads, _any, _hds = commoninc
147
147
148 # compute outgoing
148 # compute outgoing
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
150 if not mayexclude:
150 if not mayexclude:
151 og.missingheads = onlyheads or repo.heads()
151 og.missingheads = onlyheads or repo.heads()
152 elif onlyheads is None:
152 elif onlyheads is None:
153 # use visible heads as it should be cached
153 # use visible heads as it should be cached
154 og.missingheads = repo.filtered("served").heads()
154 og.missingheads = repo.filtered("served").heads()
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
156 else:
156 else:
157 # compute common, missing and exclude secret stuff
157 # compute common, missing and exclude secret stuff
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
159 og._common, allmissing = sets
159 og._common, allmissing = sets
160 og._missing = missing = []
160 og._missing = missing = []
161 og.excluded = excluded = []
161 og.excluded = excluded = []
162 for node in allmissing:
162 for node in allmissing:
163 ctx = repo[node]
163 ctx = repo[node]
164 if ctx.phase() >= phases.secret or ctx.extinct():
164 if ctx.phase() >= phases.secret or ctx.extinct():
165 excluded.append(node)
165 excluded.append(node)
166 else:
166 else:
167 missing.append(node)
167 missing.append(node)
168 if len(missing) == len(allmissing):
168 if len(missing) == len(allmissing):
169 missingheads = onlyheads
169 missingheads = onlyheads
170 else: # update missing heads
170 else: # update missing heads
171 missingheads = phases.newheads(repo, onlyheads, excluded)
171 missingheads = phases.newheads(repo, onlyheads, excluded)
172 og.missingheads = missingheads
172 og.missingheads = missingheads
173 if portable:
173 if portable:
174 # recompute common and missingheads as if -r<rev> had been given for
174 # recompute common and missingheads as if -r<rev> had been given for
175 # each head of missing, and --base <rev> for each head of the proper
175 # each head of missing, and --base <rev> for each head of the proper
176 # ancestors of missing
176 # ancestors of missing
177 og._computecommonmissing()
177 og._computecommonmissing()
178 cl = repo.changelog
178 cl = repo.changelog
179 missingrevs = set(cl.rev(n) for n in og._missing)
179 missingrevs = set(cl.rev(n) for n in og._missing)
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
181 commonheads = set(og.commonheads)
181 commonheads = set(og.commonheads)
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
183
183
184 return og
184 return og
185
185
186 def _headssummary(pushop):
186 def _headssummary(pushop):
187 """compute a summary of branch and heads status before and after push
187 """compute a summary of branch and heads status before and after push
188
188
189 return {'branch': ([remoteheads], [newheads],
189 return {'branch': ([remoteheads], [newheads],
190 [unsyncedheads], [discardedheads])} mapping
190 [unsyncedheads], [discardedheads])} mapping
191
191
192 - branch: the branch name,
192 - branch: the branch name,
193 - remoteheads: the list of remote heads known locally
193 - remoteheads: the list of remote heads known locally
194 None if the branch is new,
194 None if the branch is new,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
196 - unsyncedheads: the list of remote heads unknown locally,
196 - unsyncedheads: the list of remote heads unknown locally,
197 - discardedheads: the list of heads made obsolete by the push.
197 - discardedheads: the list of heads made obsolete by the push.
198 """
198 """
199 repo = pushop.repo.unfiltered()
199 repo = pushop.repo.unfiltered()
200 remote = pushop.remote
200 remote = pushop.remote
201 outgoing = pushop.outgoing
201 outgoing = pushop.outgoing
202 cl = repo.changelog
202 cl = repo.changelog
203 headssum = {}
203 headssum = {}
204 # A. Create set of branches involved in the push.
204 # A. Create set of branches involved in the push.
205 branches = set(repo[n].branch() for n in outgoing.missing)
205 branches = set(repo[n].branch() for n in outgoing.missing)
206
206
207 with remote.commandexecutor() as e:
207 with remote.commandexecutor() as e:
208 remotemap = e.callcommand('branchmap', {}).result()
208 remotemap = e.callcommand('branchmap', {}).result()
209
209
210 newbranches = branches - set(remotemap)
210 newbranches = branches - set(remotemap)
211 branches.difference_update(newbranches)
211 branches.difference_update(newbranches)
212
212
213 # A. register remote heads
213 # A. register remote heads
214 remotebranches = set()
214 remotebranches = set()
215 for branch, heads in remotemap.iteritems():
215 for branch, heads in remotemap.iteritems():
216 remotebranches.add(branch)
216 remotebranches.add(branch)
217 known = []
217 known = []
218 unsynced = []
218 unsynced = []
219 knownnode = cl.hasnode # do not use nodemap until it is filtered
219 knownnode = cl.hasnode # do not use nodemap until it is filtered
220 for h in heads:
220 for h in heads:
221 if knownnode(h):
221 if knownnode(h):
222 known.append(h)
222 known.append(h)
223 else:
223 else:
224 unsynced.append(h)
224 unsynced.append(h)
225 headssum[branch] = (known, list(known), unsynced)
225 headssum[branch] = (known, list(known), unsynced)
226 # B. add new branch data
226 # B. add new branch data
227 missingctx = list(repo[n] for n in outgoing.missing)
227 missingctx = list(repo[n] for n in outgoing.missing)
228 touchedbranches = set()
228 touchedbranches = set()
229 for ctx in missingctx:
229 for ctx in missingctx:
230 branch = ctx.branch()
230 branch = ctx.branch()
231 touchedbranches.add(branch)
231 touchedbranches.add(branch)
232 if branch not in headssum:
232 if branch not in headssum:
233 headssum[branch] = (None, [], [])
233 headssum[branch] = (None, [], [])
234
234
235 # C drop data about untouched branches:
235 # C drop data about untouched branches:
236 for branch in remotebranches - touchedbranches:
236 for branch in remotebranches - touchedbranches:
237 del headssum[branch]
237 del headssum[branch]
238
238
239 # D. Update newmap with outgoing changes.
239 # D. Update newmap with outgoing changes.
240 # This will possibly add new heads and remove existing ones.
240 # This will possibly add new heads and remove existing ones.
241 newmap = branchmap.branchcache((branch, heads[1])
241 newmap = branchmap.remotebranchcache((branch, heads[1])
242 for branch, heads in headssum.iteritems()
242 for branch, heads in headssum.iteritems()
243 if heads[0] is not None)
243 if heads[0] is not None)
244 newmap.update(repo, (ctx.rev() for ctx in missingctx))
244 newmap.update(repo, (ctx.rev() for ctx in missingctx))
245 for branch, newheads in newmap.iteritems():
245 for branch, newheads in newmap.iteritems():
246 headssum[branch][1][:] = newheads
246 headssum[branch][1][:] = newheads
247 for branch, items in headssum.iteritems():
247 for branch, items in headssum.iteritems():
248 for l in items:
248 for l in items:
249 if l is not None:
249 if l is not None:
250 l.sort()
250 l.sort()
251 headssum[branch] = items + ([],)
251 headssum[branch] = items + ([],)
252
252
253 # If there are no obsstore, no post processing are needed.
253 # If there are no obsstore, no post processing are needed.
254 if repo.obsstore:
254 if repo.obsstore:
255 torev = repo.changelog.rev
255 torev = repo.changelog.rev
256 futureheads = set(torev(h) for h in outgoing.missingheads)
256 futureheads = set(torev(h) for h in outgoing.missingheads)
257 futureheads |= set(torev(h) for h in outgoing.commonheads)
257 futureheads |= set(torev(h) for h in outgoing.commonheads)
258 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
258 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
259 for branch, heads in sorted(headssum.iteritems()):
259 for branch, heads in sorted(headssum.iteritems()):
260 remoteheads, newheads, unsyncedheads, placeholder = heads
260 remoteheads, newheads, unsyncedheads, placeholder = heads
261 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
261 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
262 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
262 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
263 sorted(result[1]))
263 sorted(result[1]))
264 return headssum
264 return headssum
265
265
266 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
266 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
267 """Compute branchmapsummary for repo without branchmap support"""
267 """Compute branchmapsummary for repo without branchmap support"""
268
268
269 # 1-4b. old servers: Check for new topological heads.
269 # 1-4b. old servers: Check for new topological heads.
270 # Construct {old,new}map with branch = None (topological branch).
270 # Construct {old,new}map with branch = None (topological branch).
271 # (code based on update)
271 # (code based on update)
272 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
272 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
273 oldheads = sorted(h for h in remoteheads if knownnode(h))
273 oldheads = sorted(h for h in remoteheads if knownnode(h))
274 # all nodes in outgoing.missing are children of either:
274 # all nodes in outgoing.missing are children of either:
275 # - an element of oldheads
275 # - an element of oldheads
276 # - another element of outgoing.missing
276 # - another element of outgoing.missing
277 # - nullrev
277 # - nullrev
278 # This explains why the new head are very simple to compute.
278 # This explains why the new head are very simple to compute.
279 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
279 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
280 newheads = sorted(c.node() for c in r)
280 newheads = sorted(c.node() for c in r)
281 # set some unsynced head to issue the "unsynced changes" warning
281 # set some unsynced head to issue the "unsynced changes" warning
282 if inc:
282 if inc:
283 unsynced = [None]
283 unsynced = [None]
284 else:
284 else:
285 unsynced = []
285 unsynced = []
286 return {None: (oldheads, newheads, unsynced, [])}
286 return {None: (oldheads, newheads, unsynced, [])}
287
287
288 def _nowarnheads(pushop):
288 def _nowarnheads(pushop):
289 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
289 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
290 repo = pushop.repo.unfiltered()
290 repo = pushop.repo.unfiltered()
291 remote = pushop.remote
291 remote = pushop.remote
292 localbookmarks = repo._bookmarks
292 localbookmarks = repo._bookmarks
293
293
294 with remote.commandexecutor() as e:
294 with remote.commandexecutor() as e:
295 remotebookmarks = e.callcommand('listkeys', {
295 remotebookmarks = e.callcommand('listkeys', {
296 'namespace': 'bookmarks',
296 'namespace': 'bookmarks',
297 }).result()
297 }).result()
298
298
299 bookmarkedheads = set()
299 bookmarkedheads = set()
300
300
301 # internal config: bookmarks.pushing
301 # internal config: bookmarks.pushing
302 newbookmarks = [localbookmarks.expandname(b)
302 newbookmarks = [localbookmarks.expandname(b)
303 for b in pushop.ui.configlist('bookmarks', 'pushing')]
303 for b in pushop.ui.configlist('bookmarks', 'pushing')]
304
304
305 for bm in localbookmarks:
305 for bm in localbookmarks:
306 rnode = remotebookmarks.get(bm)
306 rnode = remotebookmarks.get(bm)
307 if rnode and rnode in repo:
307 if rnode and rnode in repo:
308 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
308 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
309 if bookmarks.validdest(repo, rctx, lctx):
309 if bookmarks.validdest(repo, rctx, lctx):
310 bookmarkedheads.add(lctx.node())
310 bookmarkedheads.add(lctx.node())
311 else:
311 else:
312 if bm in newbookmarks and bm not in remotebookmarks:
312 if bm in newbookmarks and bm not in remotebookmarks:
313 bookmarkedheads.add(localbookmarks[bm])
313 bookmarkedheads.add(localbookmarks[bm])
314
314
315 return bookmarkedheads
315 return bookmarkedheads
316
316
317 def checkheads(pushop):
317 def checkheads(pushop):
318 """Check that a push won't add any outgoing head
318 """Check that a push won't add any outgoing head
319
319
320 raise Abort error and display ui message as needed.
320 raise Abort error and display ui message as needed.
321 """
321 """
322
322
323 repo = pushop.repo.unfiltered()
323 repo = pushop.repo.unfiltered()
324 remote = pushop.remote
324 remote = pushop.remote
325 outgoing = pushop.outgoing
325 outgoing = pushop.outgoing
326 remoteheads = pushop.remoteheads
326 remoteheads = pushop.remoteheads
327 newbranch = pushop.newbranch
327 newbranch = pushop.newbranch
328 inc = bool(pushop.incoming)
328 inc = bool(pushop.incoming)
329
329
330 # Check for each named branch if we're creating new remote heads.
330 # Check for each named branch if we're creating new remote heads.
331 # To be a remote head after push, node must be either:
331 # To be a remote head after push, node must be either:
332 # - unknown locally
332 # - unknown locally
333 # - a local outgoing head descended from update
333 # - a local outgoing head descended from update
334 # - a remote head that's known locally and not
334 # - a remote head that's known locally and not
335 # ancestral to an outgoing head
335 # ancestral to an outgoing head
336 if remoteheads == [nullid]:
336 if remoteheads == [nullid]:
337 # remote is empty, nothing to check.
337 # remote is empty, nothing to check.
338 return
338 return
339
339
340 if remote.capable('branchmap'):
340 if remote.capable('branchmap'):
341 headssum = _headssummary(pushop)
341 headssum = _headssummary(pushop)
342 else:
342 else:
343 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
343 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
344 pushop.pushbranchmap = headssum
344 pushop.pushbranchmap = headssum
345 newbranches = [branch for branch, heads in headssum.iteritems()
345 newbranches = [branch for branch, heads in headssum.iteritems()
346 if heads[0] is None]
346 if heads[0] is None]
347 # 1. Check for new branches on the remote.
347 # 1. Check for new branches on the remote.
348 if newbranches and not newbranch: # new branch requires --new-branch
348 if newbranches and not newbranch: # new branch requires --new-branch
349 branchnames = ', '.join(sorted(newbranches))
349 branchnames = ', '.join(sorted(newbranches))
350 raise error.Abort(_("push creates new remote branches: %s!")
350 raise error.Abort(_("push creates new remote branches: %s!")
351 % branchnames,
351 % branchnames,
352 hint=_("use 'hg push --new-branch' to create"
352 hint=_("use 'hg push --new-branch' to create"
353 " new remote branches"))
353 " new remote branches"))
354
354
355 # 2. Find heads that we need not warn about
355 # 2. Find heads that we need not warn about
356 nowarnheads = _nowarnheads(pushop)
356 nowarnheads = _nowarnheads(pushop)
357
357
358 # 3. Check for new heads.
358 # 3. Check for new heads.
359 # If there are more heads after the push than before, a suitable
359 # If there are more heads after the push than before, a suitable
360 # error message, depending on unsynced status, is displayed.
360 # error message, depending on unsynced status, is displayed.
361 errormsg = None
361 errormsg = None
362 for branch, heads in sorted(headssum.iteritems()):
362 for branch, heads in sorted(headssum.iteritems()):
363 remoteheads, newheads, unsyncedheads, discardedheads = heads
363 remoteheads, newheads, unsyncedheads, discardedheads = heads
364 # add unsynced data
364 # add unsynced data
365 if remoteheads is None:
365 if remoteheads is None:
366 oldhs = set()
366 oldhs = set()
367 else:
367 else:
368 oldhs = set(remoteheads)
368 oldhs = set(remoteheads)
369 oldhs.update(unsyncedheads)
369 oldhs.update(unsyncedheads)
370 dhs = None # delta heads, the new heads on branch
370 dhs = None # delta heads, the new heads on branch
371 newhs = set(newheads)
371 newhs = set(newheads)
372 newhs.update(unsyncedheads)
372 newhs.update(unsyncedheads)
373 if unsyncedheads:
373 if unsyncedheads:
374 if None in unsyncedheads:
374 if None in unsyncedheads:
375 # old remote, no heads data
375 # old remote, no heads data
376 heads = None
376 heads = None
377 else:
377 else:
378 heads = scmutil.nodesummaries(repo, unsyncedheads)
378 heads = scmutil.nodesummaries(repo, unsyncedheads)
379 if heads is None:
379 if heads is None:
380 repo.ui.status(_("remote has heads that are "
380 repo.ui.status(_("remote has heads that are "
381 "not known locally\n"))
381 "not known locally\n"))
382 elif branch is None:
382 elif branch is None:
383 repo.ui.status(_("remote has heads that are "
383 repo.ui.status(_("remote has heads that are "
384 "not known locally: %s\n") % heads)
384 "not known locally: %s\n") % heads)
385 else:
385 else:
386 repo.ui.status(_("remote has heads on branch '%s' that are "
386 repo.ui.status(_("remote has heads on branch '%s' that are "
387 "not known locally: %s\n") % (branch, heads))
387 "not known locally: %s\n") % (branch, heads))
388 if remoteheads is None:
388 if remoteheads is None:
389 if len(newhs) > 1:
389 if len(newhs) > 1:
390 dhs = list(newhs)
390 dhs = list(newhs)
391 if errormsg is None:
391 if errormsg is None:
392 errormsg = (_("push creates new branch '%s' "
392 errormsg = (_("push creates new branch '%s' "
393 "with multiple heads") % (branch))
393 "with multiple heads") % (branch))
394 hint = _("merge or"
394 hint = _("merge or"
395 " see 'hg help push' for details about"
395 " see 'hg help push' for details about"
396 " pushing new heads")
396 " pushing new heads")
397 elif len(newhs) > len(oldhs):
397 elif len(newhs) > len(oldhs):
398 # remove bookmarked or existing remote heads from the new heads list
398 # remove bookmarked or existing remote heads from the new heads list
399 dhs = sorted(newhs - nowarnheads - oldhs)
399 dhs = sorted(newhs - nowarnheads - oldhs)
400 if dhs:
400 if dhs:
401 if errormsg is None:
401 if errormsg is None:
402 if branch not in ('default', None):
402 if branch not in ('default', None):
403 errormsg = _("push creates new remote head %s "
403 errormsg = _("push creates new remote head %s "
404 "on branch '%s'!") % (short(dhs[0]), branch)
404 "on branch '%s'!") % (short(dhs[0]), branch)
405 elif repo[dhs[0]].bookmarks():
405 elif repo[dhs[0]].bookmarks():
406 errormsg = _("push creates new remote head %s "
406 errormsg = _("push creates new remote head %s "
407 "with bookmark '%s'!") % (
407 "with bookmark '%s'!") % (
408 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
408 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
409 else:
409 else:
410 errormsg = _("push creates new remote head %s!"
410 errormsg = _("push creates new remote head %s!"
411 ) % short(dhs[0])
411 ) % short(dhs[0])
412 if unsyncedheads:
412 if unsyncedheads:
413 hint = _("pull and merge or"
413 hint = _("pull and merge or"
414 " see 'hg help push' for details about"
414 " see 'hg help push' for details about"
415 " pushing new heads")
415 " pushing new heads")
416 else:
416 else:
417 hint = _("merge or"
417 hint = _("merge or"
418 " see 'hg help push' for details about"
418 " see 'hg help push' for details about"
419 " pushing new heads")
419 " pushing new heads")
420 if branch is None:
420 if branch is None:
421 repo.ui.note(_("new remote heads:\n"))
421 repo.ui.note(_("new remote heads:\n"))
422 else:
422 else:
423 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
423 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
424 for h in dhs:
424 for h in dhs:
425 repo.ui.note((" %s\n") % short(h))
425 repo.ui.note((" %s\n") % short(h))
426 if errormsg:
426 if errormsg:
427 raise error.Abort(errormsg, hint=hint)
427 raise error.Abort(errormsg, hint=hint)
428
428
429 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
429 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
430 """post process the list of new heads with obsolescence information
430 """post process the list of new heads with obsolescence information
431
431
432 Exists as a sub-function to contain the complexity and allow extensions to
432 Exists as a sub-function to contain the complexity and allow extensions to
433 experiment with smarter logic.
433 experiment with smarter logic.
434
434
435 Returns (newheads, discarded_heads) tuple
435 Returns (newheads, discarded_heads) tuple
436 """
436 """
437 # known issue
437 # known issue
438 #
438 #
439 # * We "silently" skip processing on all changeset unknown locally
439 # * We "silently" skip processing on all changeset unknown locally
440 #
440 #
441 # * if <nh> is public on the remote, it won't be affected by obsolete
441 # * if <nh> is public on the remote, it won't be affected by obsolete
442 # marker and a new is created
442 # marker and a new is created
443
443
444 # define various utilities and containers
444 # define various utilities and containers
445 repo = pushop.repo
445 repo = pushop.repo
446 unfi = repo.unfiltered()
446 unfi = repo.unfiltered()
447 tonode = unfi.changelog.node
447 tonode = unfi.changelog.node
448 torev = unfi.changelog.nodemap.get
448 torev = unfi.changelog.nodemap.get
449 public = phases.public
449 public = phases.public
450 getphase = unfi._phasecache.phase
450 getphase = unfi._phasecache.phase
451 ispublic = (lambda r: getphase(unfi, r) == public)
451 ispublic = (lambda r: getphase(unfi, r) == public)
452 ispushed = (lambda n: torev(n) in futurecommon)
452 ispushed = (lambda n: torev(n) in futurecommon)
453 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
453 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
454 successorsmarkers = unfi.obsstore.successors
454 successorsmarkers = unfi.obsstore.successors
455 newhs = set() # final set of new heads
455 newhs = set() # final set of new heads
456 discarded = set() # new head of fully replaced branch
456 discarded = set() # new head of fully replaced branch
457
457
458 localcandidate = set() # candidate heads known locally
458 localcandidate = set() # candidate heads known locally
459 unknownheads = set() # candidate heads unknown locally
459 unknownheads = set() # candidate heads unknown locally
460 for h in candidate_newhs:
460 for h in candidate_newhs:
461 if h in unfi:
461 if h in unfi:
462 localcandidate.add(h)
462 localcandidate.add(h)
463 else:
463 else:
464 if successorsmarkers.get(h) is not None:
464 if successorsmarkers.get(h) is not None:
465 msg = ('checkheads: remote head unknown locally has'
465 msg = ('checkheads: remote head unknown locally has'
466 ' local marker: %s\n')
466 ' local marker: %s\n')
467 repo.ui.debug(msg % hex(h))
467 repo.ui.debug(msg % hex(h))
468 unknownheads.add(h)
468 unknownheads.add(h)
469
469
470 # fast path the simple case
470 # fast path the simple case
471 if len(localcandidate) == 1:
471 if len(localcandidate) == 1:
472 return unknownheads | set(candidate_newhs), set()
472 return unknownheads | set(candidate_newhs), set()
473
473
474 # actually process branch replacement
474 # actually process branch replacement
475 while localcandidate:
475 while localcandidate:
476 nh = localcandidate.pop()
476 nh = localcandidate.pop()
477 # run this check early to skip the evaluation of the whole branch
477 # run this check early to skip the evaluation of the whole branch
478 if (torev(nh) in futurecommon or ispublic(torev(nh))):
478 if (torev(nh) in futurecommon or ispublic(torev(nh))):
479 newhs.add(nh)
479 newhs.add(nh)
480 continue
480 continue
481
481
482 # Get all revs/nodes on the branch exclusive to this head
482 # Get all revs/nodes on the branch exclusive to this head
483 # (already filtered heads are "ignored"))
483 # (already filtered heads are "ignored"))
484 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
484 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
485 nh, localcandidate, newhs)
485 nh, localcandidate, newhs)
486 branchnodes = [tonode(r) for r in branchrevs]
486 branchnodes = [tonode(r) for r in branchrevs]
487
487
488 # The branch won't be hidden on the remote if
488 # The branch won't be hidden on the remote if
489 # * any part of it is public,
489 # * any part of it is public,
490 # * any part of it is considered part of the result by previous logic,
490 # * any part of it is considered part of the result by previous logic,
491 # * if we have no markers to push to obsolete it.
491 # * if we have no markers to push to obsolete it.
492 if (any(ispublic(r) for r in branchrevs)
492 if (any(ispublic(r) for r in branchrevs)
493 or any(torev(n) in futurecommon for n in branchnodes)
493 or any(torev(n) in futurecommon for n in branchnodes)
494 or any(not hasoutmarker(n) for n in branchnodes)):
494 or any(not hasoutmarker(n) for n in branchnodes)):
495 newhs.add(nh)
495 newhs.add(nh)
496 else:
496 else:
497 # note: there is a corner case if there is a merge in the branch.
497 # note: there is a corner case if there is a merge in the branch.
498 # we might end up with -more- heads. However, these heads are not
498 # we might end up with -more- heads. However, these heads are not
499 # "added" by the push, but more by the "removal" on the remote so I
499 # "added" by the push, but more by the "removal" on the remote so I
500 # think is a okay to ignore them,
500 # think is a okay to ignore them,
501 discarded.add(nh)
501 discarded.add(nh)
502 newhs |= unknownheads
502 newhs |= unknownheads
503 return newhs, discarded
503 return newhs, discarded
504
504
505 def pushingmarkerfor(obsstore, ispushed, node):
505 def pushingmarkerfor(obsstore, ispushed, node):
506 """true if some markers are to be pushed for node
506 """true if some markers are to be pushed for node
507
507
508 We cannot just look in to the pushed obsmarkers from the pushop because
508 We cannot just look in to the pushed obsmarkers from the pushop because
509 discovery might have filtered relevant markers. In addition listing all
509 discovery might have filtered relevant markers. In addition listing all
510 markers relevant to all changesets in the pushed set would be too expensive
510 markers relevant to all changesets in the pushed set would be too expensive
511 (O(len(repo)))
511 (O(len(repo)))
512
512
513 (note: There are cache opportunity in this function. but it would requires
513 (note: There are cache opportunity in this function. but it would requires
514 a two dimensional stack.)
514 a two dimensional stack.)
515 """
515 """
516 successorsmarkers = obsstore.successors
516 successorsmarkers = obsstore.successors
517 stack = [node]
517 stack = [node]
518 seen = set(stack)
518 seen = set(stack)
519 while stack:
519 while stack:
520 current = stack.pop()
520 current = stack.pop()
521 if ispushed(current):
521 if ispushed(current):
522 return True
522 return True
523 markers = successorsmarkers.get(current, ())
523 markers = successorsmarkers.get(current, ())
524 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
524 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
525 for m in markers:
525 for m in markers:
526 nexts = m[1] # successors
526 nexts = m[1] # successors
527 if not nexts: # this is a prune marker
527 if not nexts: # this is a prune marker
528 nexts = m[5] or () # parents
528 nexts = m[5] or () # parents
529 for n in nexts:
529 for n in nexts:
530 if n not in seen:
530 if n not in seen:
531 seen.add(n)
531 seen.add(n)
532 stack.append(n)
532 stack.append(n)
533 return False
533 return False
General Comments 0
You need to be logged in to leave comments. Login now