##// END OF EJS Templates
bookmarks: remove changectx() method from bmstore (API)...
Augie Fackler -
r43248:e3bb2a58 default
parent child Browse files
Show More
@@ -1,115 +1,116 b''
1 1 # Copyright 2017 Facebook, Inc.
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import absolute_import
7 7
8 8 from mercurial.i18n import _
9 9
10 10 from mercurial import (
11 11 bundle2,
12 12 changegroup,
13 13 error,
14 14 extensions,
15 node as nodemod,
15 16 revsetlang,
16 17 util,
17 18 )
18 19
19 20 from . import common
20 21
21 22 isremotebooksenabled = common.isremotebooksenabled
22 23
23 24 scratchbranchparttype = 'b2x:infinitepush'
24 25
25 26 def getscratchbranchparts(repo, peer, outgoing, ui, bookmark):
26 27 if not outgoing.missing:
27 28 raise error.Abort(_('no commits to push'))
28 29
29 30 if scratchbranchparttype not in bundle2.bundle2caps(peer):
30 31 raise error.Abort(_('no server support for %r') % scratchbranchparttype)
31 32
32 33 _validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing),
33 34 bookmark)
34 35
35 36 supportedversions = changegroup.supportedoutgoingversions(repo)
36 37 # Explicitly avoid using '01' changegroup version in infinitepush to
37 38 # support general delta
38 39 supportedversions.discard('01')
39 40 cgversion = min(supportedversions)
40 41 _handlelfs(repo, outgoing.missing)
41 42 cg = changegroup.makestream(repo, outgoing, cgversion, 'push')
42 43
43 44 params = {}
44 45 params['cgversion'] = cgversion
45 46 if bookmark:
46 47 params['bookmark'] = bookmark
47 48 # 'prevbooknode' is necessary for pushkey reply part
48 49 params['bookprevnode'] = ''
49 50 bookmarks = repo._bookmarks
50 51 if bookmark in bookmarks:
51 params['bookprevnode'] = bookmarks.changectx(bookmark).hex()
52 params['bookprevnode'] = nodemod.hex(bookmarks[bookmark])
52 53
53 54 # Do not send pushback bundle2 part with bookmarks if remotenames extension
54 55 # is enabled. It will be handled manually in `_push()`
55 56 if not isremotebooksenabled(ui):
56 57 params['pushbackbookmarks'] = '1'
57 58
58 59 parts = []
59 60
60 61 # .upper() marks this as a mandatory part: server will abort if there's no
61 62 # handler
62 63 parts.append(bundle2.bundlepart(
63 64 scratchbranchparttype.upper(),
64 65 advisoryparams=params.iteritems(),
65 66 data=cg))
66 67
67 68 return parts
68 69
69 70 def _validaterevset(repo, revset, bookmark):
70 71 """Abort if the revs to be pushed aren't valid for a scratch branch."""
71 72 if not repo.revs(revset):
72 73 raise error.Abort(_('nothing to push'))
73 74 if bookmark:
74 75 # Allow bundle with many heads only if no bookmark is specified
75 76 heads = repo.revs('heads(%r)', revset)
76 77 if len(heads) > 1:
77 78 raise error.Abort(
78 79 _('cannot push more than one head to a scratch branch'))
79 80
80 81 def _handlelfs(repo, missing):
81 82 '''Special case if lfs is enabled
82 83
83 84 If lfs is enabled then we need to call prepush hook
84 85 to make sure large files are uploaded to lfs
85 86 '''
86 87 try:
87 88 lfsmod = extensions.find('lfs')
88 89 lfsmod.wrapper.uploadblobsfromrevs(repo, missing)
89 90 except KeyError:
90 91 # Ignore if lfs extension is not enabled
91 92 return
92 93
93 94 class copiedpart(object):
94 95 """a copy of unbundlepart content that can be consumed later"""
95 96
96 97 def __init__(self, part):
97 98 # copy "public properties"
98 99 self.type = part.type
99 100 self.id = part.id
100 101 self.mandatory = part.mandatory
101 102 self.mandatoryparams = part.mandatoryparams
102 103 self.advisoryparams = part.advisoryparams
103 104 self.params = part.params
104 105 self.mandatorykeys = part.mandatorykeys
105 106 # copy the buffer
106 107 self._io = util.stringio(part.read())
107 108
108 109 def consume(self):
109 110 return
110 111
111 112 def read(self, size=None):
112 113 if size is None:
113 114 return self._io.read()
114 115 else:
115 116 return self._io.read(size)
@@ -1,959 +1,955 b''
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import struct
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 short,
18 18 wdirid,
19 19 )
20 20 from . import (
21 21 encoding,
22 22 error,
23 23 obsutil,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 # label constants
31 31 # until 3.5, bookmarks.current was the advertised name, not
32 32 # bookmarks.active, so we must use both to avoid breaking old
33 33 # custom styles
34 34 activebookmarklabel = 'bookmarks.active bookmarks.current'
35 35
36 36 BOOKMARKS_IN_STORE_REQUIREMENT = 'bookmarksinstore'
37 37
38 38 def bookmarksinstore(repo):
39 39 return BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
40 40
41 41 def bookmarksvfs(repo):
42 42 return repo.svfs if bookmarksinstore(repo) else repo.vfs
43 43
44 44 def _getbkfile(repo):
45 45 """Hook so that extensions that mess with the store can hook bm storage.
46 46
47 47 For core, this just handles wether we should see pending
48 48 bookmarks or the committed ones. Other extensions (like share)
49 49 may need to tweak this behavior further.
50 50 """
51 51 fp, pending = txnutil.trypending(repo.root, bookmarksvfs(repo), 'bookmarks')
52 52 return fp
53 53
54 54 class bmstore(object):
55 55 r"""Storage for bookmarks.
56 56
57 57 This object should do all bookmark-related reads and writes, so
58 58 that it's fairly simple to replace the storage underlying
59 59 bookmarks without having to clone the logic surrounding
60 60 bookmarks. This type also should manage the active bookmark, if
61 61 any.
62 62
63 63 This particular bmstore implementation stores bookmarks as
64 64 {hash}\s{name}\n (the same format as localtags) in
65 65 .hg/bookmarks. The mapping is stored as {name: nodeid}.
66 66 """
67 67
68 68 def __init__(self, repo):
69 69 self._repo = repo
70 70 self._refmap = refmap = {} # refspec: node
71 71 self._nodemap = nodemap = {} # node: sorted([refspec, ...])
72 72 self._clean = True
73 73 self._aclean = True
74 74 nm = repo.changelog.nodemap
75 75 tonode = bin # force local lookup
76 76 try:
77 77 with _getbkfile(repo) as bkfile:
78 78 for line in bkfile:
79 79 line = line.strip()
80 80 if not line:
81 81 continue
82 82 try:
83 83 sha, refspec = line.split(' ', 1)
84 84 node = tonode(sha)
85 85 if node in nm:
86 86 refspec = encoding.tolocal(refspec)
87 87 refmap[refspec] = node
88 88 nrefs = nodemap.get(node)
89 89 if nrefs is None:
90 90 nodemap[node] = [refspec]
91 91 else:
92 92 nrefs.append(refspec)
93 93 if nrefs[-2] > refspec:
94 94 # bookmarks weren't sorted before 4.5
95 95 nrefs.sort()
96 96 except (TypeError, ValueError):
97 97 # TypeError:
98 98 # - bin(...)
99 99 # ValueError:
100 100 # - node in nm, for non-20-bytes entry
101 101 # - split(...), for string without ' '
102 102 bookmarkspath = '.hg/bookmarks'
103 103 if bookmarksinstore(repo):
104 104 bookmarkspath = '.hg/store/bookmarks'
105 105 repo.ui.warn(_('malformed line in %s: %r\n')
106 106 % (bookmarkspath, pycompat.bytestr(line)))
107 107 except IOError as inst:
108 108 if inst.errno != errno.ENOENT:
109 109 raise
110 110 self._active = _readactive(repo, self)
111 111
112 112 @property
113 113 def active(self):
114 114 return self._active
115 115
116 116 @active.setter
117 117 def active(self, mark):
118 118 if mark is not None and mark not in self._refmap:
119 119 raise AssertionError('bookmark %s does not exist!' % mark)
120 120
121 121 self._active = mark
122 122 self._aclean = False
123 123
124 124 def __len__(self):
125 125 return len(self._refmap)
126 126
127 127 def __iter__(self):
128 128 return iter(self._refmap)
129 129
130 130 def iteritems(self):
131 131 return self._refmap.iteritems()
132 132
133 133 def items(self):
134 134 return self._refmap.items()
135 135
136 136 # TODO: maybe rename to allnames()?
137 137 def keys(self):
138 138 return self._refmap.keys()
139 139
140 140 # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated
141 141 # could be self._nodemap.keys()
142 142 def values(self):
143 143 return self._refmap.values()
144 144
145 145 def __contains__(self, mark):
146 146 return mark in self._refmap
147 147
148 148 def __getitem__(self, mark):
149 149 return self._refmap[mark]
150 150
151 151 def get(self, mark, default=None):
152 152 return self._refmap.get(mark, default)
153 153
154 154 def _set(self, mark, node):
155 155 self._clean = False
156 156 if mark in self._refmap:
157 157 self._del(mark)
158 158 self._refmap[mark] = node
159 159 nrefs = self._nodemap.get(node)
160 160 if nrefs is None:
161 161 self._nodemap[node] = [mark]
162 162 else:
163 163 nrefs.append(mark)
164 164 nrefs.sort()
165 165
166 166 def _del(self, mark):
167 167 self._clean = False
168 168 node = self._refmap.pop(mark)
169 169 nrefs = self._nodemap[node]
170 170 if len(nrefs) == 1:
171 171 assert nrefs[0] == mark
172 172 del self._nodemap[node]
173 173 else:
174 174 nrefs.remove(mark)
175 175
176 176 def names(self, node):
177 177 """Return a sorted list of bookmarks pointing to the specified node"""
178 178 return self._nodemap.get(node, [])
179 179
180 def changectx(self, mark):
181 node = self._refmap[mark]
182 return self._repo[node]
183
184 180 def applychanges(self, repo, tr, changes):
185 181 """Apply a list of changes to bookmarks
186 182 """
187 183 bmchanges = tr.changes.get('bookmarks')
188 184 for name, node in changes:
189 185 old = self._refmap.get(name)
190 186 if node is None:
191 187 self._del(name)
192 188 else:
193 189 self._set(name, node)
194 190 if bmchanges is not None:
195 191 # if a previous value exist preserve the "initial" value
196 192 previous = bmchanges.get(name)
197 193 if previous is not None:
198 194 old = previous[0]
199 195 bmchanges[name] = (old, node)
200 196 self._recordchange(tr)
201 197
202 198 def _recordchange(self, tr):
203 199 """record that bookmarks have been changed in a transaction
204 200
205 201 The transaction is then responsible for updating the file content."""
206 202 location = '' if bookmarksinstore(self._repo) else 'plain'
207 203 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
208 204 location=location)
209 205 tr.hookargs['bookmark_moved'] = '1'
210 206
211 207 def _writerepo(self, repo):
212 208 """Factored out for extensibility"""
213 209 rbm = repo._bookmarks
214 210 if rbm.active not in self._refmap:
215 211 rbm.active = None
216 212 rbm._writeactive()
217 213
218 214 if bookmarksinstore(repo):
219 215 vfs = repo.svfs
220 216 lock = repo.lock()
221 217 else:
222 218 vfs = repo.vfs
223 219 lock = repo.wlock()
224 220 with lock:
225 221 with vfs('bookmarks', 'w', atomictemp=True, checkambig=True) as f:
226 222 self._write(f)
227 223
228 224 def _writeactive(self):
229 225 if self._aclean:
230 226 return
231 227 with self._repo.wlock():
232 228 if self._active is not None:
233 229 with self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
234 230 checkambig=True) as f:
235 231 f.write(encoding.fromlocal(self._active))
236 232 else:
237 233 self._repo.vfs.tryunlink('bookmarks.current')
238 234 self._aclean = True
239 235
240 236 def _write(self, fp):
241 237 for name, node in sorted(self._refmap.iteritems()):
242 238 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
243 239 self._clean = True
244 240 self._repo.invalidatevolatilesets()
245 241
246 242 def expandname(self, bname):
247 243 if bname == '.':
248 244 if self.active:
249 245 return self.active
250 246 else:
251 247 raise error.RepoLookupError(_("no active bookmark"))
252 248 return bname
253 249
254 250 def checkconflict(self, mark, force=False, target=None):
255 251 """check repo for a potential clash of mark with an existing bookmark,
256 252 branch, or hash
257 253
258 254 If target is supplied, then check that we are moving the bookmark
259 255 forward.
260 256
261 257 If force is supplied, then forcibly move the bookmark to a new commit
262 258 regardless if it is a move forward.
263 259
264 260 If divergent bookmark are to be deleted, they will be returned as list.
265 261 """
266 262 cur = self._repo['.'].node()
267 263 if mark in self._refmap and not force:
268 264 if target:
269 265 if self._refmap[mark] == target and target == cur:
270 266 # re-activating a bookmark
271 267 return []
272 268 rev = self._repo[target].rev()
273 269 anc = self._repo.changelog.ancestors([rev])
274 bmctx = self.changectx(mark)
270 bmctx = self._repo[self[mark]]
275 271 divs = [self._refmap[b] for b in self._refmap
276 272 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
277 273
278 274 # allow resolving a single divergent bookmark even if moving
279 275 # the bookmark across branches when a revision is specified
280 276 # that contains a divergent bookmark
281 277 if bmctx.rev() not in anc and target in divs:
282 278 return divergent2delete(self._repo, [target], mark)
283 279
284 280 deletefrom = [b for b in divs
285 281 if self._repo[b].rev() in anc or b == target]
286 282 delbms = divergent2delete(self._repo, deletefrom, mark)
287 283 if validdest(self._repo, bmctx, self._repo[target]):
288 284 self._repo.ui.status(
289 285 _("moving bookmark '%s' forward from %s\n") %
290 286 (mark, short(bmctx.node())))
291 287 return delbms
292 288 raise error.Abort(_("bookmark '%s' already exists "
293 289 "(use -f to force)") % mark)
294 290 if ((mark in self._repo.branchmap() or
295 291 mark == self._repo.dirstate.branch()) and not force):
296 292 raise error.Abort(
297 293 _("a bookmark cannot have the name of an existing branch"))
298 294 if len(mark) > 3 and not force:
299 295 try:
300 296 shadowhash = scmutil.isrevsymbol(self._repo, mark)
301 297 except error.LookupError: # ambiguous identifier
302 298 shadowhash = False
303 299 if shadowhash:
304 300 self._repo.ui.warn(
305 301 _("bookmark %s matches a changeset hash\n"
306 302 "(did you leave a -r out of an 'hg bookmark' "
307 303 "command?)\n")
308 304 % mark)
309 305 return []
310 306
311 307 def _readactive(repo, marks):
312 308 """
313 309 Get the active bookmark. We can have an active bookmark that updates
314 310 itself as we commit. This function returns the name of that bookmark.
315 311 It is stored in .hg/bookmarks.current
316 312 """
317 313 # No readline() in osutil.posixfile, reading everything is
318 314 # cheap.
319 315 content = repo.vfs.tryread('bookmarks.current')
320 316 mark = encoding.tolocal((content.splitlines() or [''])[0])
321 317 if mark == '' or mark not in marks:
322 318 mark = None
323 319 return mark
324 320
325 321 def activate(repo, mark):
326 322 """
327 323 Set the given bookmark to be 'active', meaning that this bookmark will
328 324 follow new commits that are made.
329 325 The name is recorded in .hg/bookmarks.current
330 326 """
331 327 repo._bookmarks.active = mark
332 328 repo._bookmarks._writeactive()
333 329
334 330 def deactivate(repo):
335 331 """
336 332 Unset the active bookmark in this repository.
337 333 """
338 334 repo._bookmarks.active = None
339 335 repo._bookmarks._writeactive()
340 336
341 337 def isactivewdirparent(repo):
342 338 """
343 339 Tell whether the 'active' bookmark (the one that follows new commits)
344 340 points to one of the parents of the current working directory (wdir).
345 341
346 342 While this is normally the case, it can on occasion be false; for example,
347 343 immediately after a pull, the active bookmark can be moved to point
348 344 to a place different than the wdir. This is solved by running `hg update`.
349 345 """
350 346 mark = repo._activebookmark
351 347 marks = repo._bookmarks
352 348 parents = [p.node() for p in repo[None].parents()]
353 349 return (mark in marks and marks[mark] in parents)
354 350
355 351 def divergent2delete(repo, deletefrom, bm):
356 352 """find divergent versions of bm on nodes in deletefrom.
357 353
358 354 the list of bookmark to delete."""
359 355 todelete = []
360 356 marks = repo._bookmarks
361 357 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
362 358 for mark in divergent:
363 359 if mark == '@' or '@' not in mark:
364 360 # can't be divergent by definition
365 361 continue
366 362 if mark and marks[mark] in deletefrom:
367 363 if mark != bm:
368 364 todelete.append(mark)
369 365 return todelete
370 366
371 367 def headsforactive(repo):
372 368 """Given a repo with an active bookmark, return divergent bookmark nodes.
373 369
374 370 Args:
375 371 repo: A repository with an active bookmark.
376 372
377 373 Returns:
378 374 A list of binary node ids that is the full list of other
379 375 revisions with bookmarks divergent from the active bookmark. If
380 376 there were no divergent bookmarks, then this list will contain
381 377 only one entry.
382 378 """
383 379 if not repo._activebookmark:
384 380 raise ValueError(
385 381 'headsforactive() only makes sense with an active bookmark')
386 382 name = repo._activebookmark.split('@', 1)[0]
387 383 heads = []
388 384 for mark, n in repo._bookmarks.iteritems():
389 385 if mark.split('@', 1)[0] == name:
390 386 heads.append(n)
391 387 return heads
392 388
393 389 def calculateupdate(ui, repo):
394 390 '''Return a tuple (activemark, movemarkfrom) indicating the active bookmark
395 391 and where to move the active bookmark from, if needed.'''
396 392 checkout, movemarkfrom = None, None
397 393 activemark = repo._activebookmark
398 394 if isactivewdirparent(repo):
399 395 movemarkfrom = repo['.'].node()
400 396 elif activemark:
401 397 ui.status(_("updating to active bookmark %s\n") % activemark)
402 398 checkout = activemark
403 399 return (checkout, movemarkfrom)
404 400
405 401 def update(repo, parents, node):
406 402 deletefrom = parents
407 403 marks = repo._bookmarks
408 404 active = marks.active
409 405 if not active:
410 406 return False
411 407
412 408 bmchanges = []
413 409 if marks[active] in parents:
414 410 new = repo[node]
415 divs = [marks.changectx(b) for b in marks
411 divs = [repo[marks[b]] for b in marks
416 412 if b.split('@', 1)[0] == active.split('@', 1)[0]]
417 413 anc = repo.changelog.ancestors([new.rev()])
418 414 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
419 if validdest(repo, marks.changectx(active), new):
415 if validdest(repo, repo[marks[active]], new):
420 416 bmchanges.append((active, new.node()))
421 417
422 418 for bm in divergent2delete(repo, deletefrom, active):
423 419 bmchanges.append((bm, None))
424 420
425 421 if bmchanges:
426 422 with repo.lock(), repo.transaction('bookmark') as tr:
427 423 marks.applychanges(repo, tr, bmchanges)
428 424 return bool(bmchanges)
429 425
430 426 def listbinbookmarks(repo):
431 427 # We may try to list bookmarks on a repo type that does not
432 428 # support it (e.g., statichttprepository).
433 429 marks = getattr(repo, '_bookmarks', {})
434 430
435 431 hasnode = repo.changelog.hasnode
436 432 for k, v in marks.iteritems():
437 433 # don't expose local divergent bookmarks
438 434 if hasnode(v) and ('@' not in k or k.endswith('@')):
439 435 yield k, v
440 436
441 437 def listbookmarks(repo):
442 438 d = {}
443 439 for book, node in listbinbookmarks(repo):
444 440 d[book] = hex(node)
445 441 return d
446 442
447 443 def pushbookmark(repo, key, old, new):
448 444 if bookmarksinstore(repo):
449 445 wlock = util.nullcontextmanager()
450 446 else:
451 447 wlock = repo.wlock()
452 448 with wlock, repo.lock(), repo.transaction('bookmarks') as tr:
453 449 marks = repo._bookmarks
454 450 existing = hex(marks.get(key, ''))
455 451 if existing != old and existing != new:
456 452 return False
457 453 if new == '':
458 454 changes = [(key, None)]
459 455 else:
460 456 if new not in repo:
461 457 return False
462 458 changes = [(key, repo[new].node())]
463 459 marks.applychanges(repo, tr, changes)
464 460 return True
465 461
466 462 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
467 463 '''Compare bookmarks between srcmarks and dstmarks
468 464
469 465 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
470 466 differ, invalid)", each are list of bookmarks below:
471 467
472 468 :addsrc: added on src side (removed on dst side, perhaps)
473 469 :adddst: added on dst side (removed on src side, perhaps)
474 470 :advsrc: advanced on src side
475 471 :advdst: advanced on dst side
476 472 :diverge: diverge
477 473 :differ: changed, but changeset referred on src is unknown on dst
478 474 :invalid: unknown on both side
479 475 :same: same on both side
480 476
481 477 Each elements of lists in result tuple is tuple "(bookmark name,
482 478 changeset ID on source side, changeset ID on destination
483 479 side)". Each changeset ID is a binary node or None.
484 480
485 481 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
486 482 "invalid" list may be unknown for repo.
487 483
488 484 If "targets" is specified, only bookmarks listed in it are
489 485 examined.
490 486 '''
491 487
492 488 if targets:
493 489 bset = set(targets)
494 490 else:
495 491 srcmarkset = set(srcmarks)
496 492 dstmarkset = set(dstmarks)
497 493 bset = srcmarkset | dstmarkset
498 494
499 495 results = ([], [], [], [], [], [], [], [])
500 496 addsrc = results[0].append
501 497 adddst = results[1].append
502 498 advsrc = results[2].append
503 499 advdst = results[3].append
504 500 diverge = results[4].append
505 501 differ = results[5].append
506 502 invalid = results[6].append
507 503 same = results[7].append
508 504
509 505 for b in sorted(bset):
510 506 if b not in srcmarks:
511 507 if b in dstmarks:
512 508 adddst((b, None, dstmarks[b]))
513 509 else:
514 510 invalid((b, None, None))
515 511 elif b not in dstmarks:
516 512 addsrc((b, srcmarks[b], None))
517 513 else:
518 514 scid = srcmarks[b]
519 515 dcid = dstmarks[b]
520 516 if scid == dcid:
521 517 same((b, scid, dcid))
522 518 elif scid in repo and dcid in repo:
523 519 sctx = repo[scid]
524 520 dctx = repo[dcid]
525 521 if sctx.rev() < dctx.rev():
526 522 if validdest(repo, sctx, dctx):
527 523 advdst((b, scid, dcid))
528 524 else:
529 525 diverge((b, scid, dcid))
530 526 else:
531 527 if validdest(repo, dctx, sctx):
532 528 advsrc((b, scid, dcid))
533 529 else:
534 530 diverge((b, scid, dcid))
535 531 else:
536 532 # it is too expensive to examine in detail, in this case
537 533 differ((b, scid, dcid))
538 534
539 535 return results
540 536
541 537 def _diverge(ui, b, path, localmarks, remotenode):
542 538 '''Return appropriate diverged bookmark for specified ``path``
543 539
544 540 This returns None, if it is failed to assign any divergent
545 541 bookmark name.
546 542
547 543 This reuses already existing one with "@number" suffix, if it
548 544 refers ``remotenode``.
549 545 '''
550 546 if b == '@':
551 547 b = ''
552 548 # try to use an @pathalias suffix
553 549 # if an @pathalias already exists, we overwrite (update) it
554 550 if path.startswith("file:"):
555 551 path = util.url(path).path
556 552 for p, u in ui.configitems("paths"):
557 553 if u.startswith("file:"):
558 554 u = util.url(u).path
559 555 if path == u:
560 556 return '%s@%s' % (b, p)
561 557
562 558 # assign a unique "@number" suffix newly
563 559 for x in range(1, 100):
564 560 n = '%s@%d' % (b, x)
565 561 if n not in localmarks or localmarks[n] == remotenode:
566 562 return n
567 563
568 564 return None
569 565
570 566 def unhexlifybookmarks(marks):
571 567 binremotemarks = {}
572 568 for name, node in marks.items():
573 569 binremotemarks[name] = bin(node)
574 570 return binremotemarks
575 571
576 572 _binaryentry = struct.Struct('>20sH')
577 573
578 574 def binaryencode(bookmarks):
579 575 """encode a '(bookmark, node)' iterable into a binary stream
580 576
581 577 the binary format is:
582 578
583 579 <node><bookmark-length><bookmark-name>
584 580
585 581 :node: is a 20 bytes binary node,
586 582 :bookmark-length: an unsigned short,
587 583 :bookmark-name: the name of the bookmark (of length <bookmark-length>)
588 584
589 585 wdirid (all bits set) will be used as a special value for "missing"
590 586 """
591 587 binarydata = []
592 588 for book, node in bookmarks:
593 589 if not node: # None or ''
594 590 node = wdirid
595 591 binarydata.append(_binaryentry.pack(node, len(book)))
596 592 binarydata.append(book)
597 593 return ''.join(binarydata)
598 594
599 595 def binarydecode(stream):
600 596 """decode a binary stream into an '(bookmark, node)' iterable
601 597
602 598 the binary format is:
603 599
604 600 <node><bookmark-length><bookmark-name>
605 601
606 602 :node: is a 20 bytes binary node,
607 603 :bookmark-length: an unsigned short,
608 604 :bookmark-name: the name of the bookmark (of length <bookmark-length>))
609 605
610 606 wdirid (all bits set) will be used as a special value for "missing"
611 607 """
612 608 entrysize = _binaryentry.size
613 609 books = []
614 610 while True:
615 611 entry = stream.read(entrysize)
616 612 if len(entry) < entrysize:
617 613 if entry:
618 614 raise error.Abort(_('bad bookmark stream'))
619 615 break
620 616 node, length = _binaryentry.unpack(entry)
621 617 bookmark = stream.read(length)
622 618 if len(bookmark) < length:
623 619 if entry:
624 620 raise error.Abort(_('bad bookmark stream'))
625 621 if node == wdirid:
626 622 node = None
627 623 books.append((bookmark, node))
628 624 return books
629 625
630 626 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
631 627 ui.debug("checking for updated bookmarks\n")
632 628 localmarks = repo._bookmarks
633 629 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
634 630 ) = comparebookmarks(repo, remotemarks, localmarks)
635 631
636 632 status = ui.status
637 633 warn = ui.warn
638 634 if ui.configbool('ui', 'quietbookmarkmove'):
639 635 status = warn = ui.debug
640 636
641 637 explicit = set(explicit)
642 638 changed = []
643 639 for b, scid, dcid in addsrc:
644 640 if scid in repo: # add remote bookmarks for changes we already have
645 641 changed.append((b, scid, status,
646 642 _("adding remote bookmark %s\n") % (b)))
647 643 elif b in explicit:
648 644 explicit.remove(b)
649 645 ui.warn(_("remote bookmark %s points to locally missing %s\n")
650 646 % (b, hex(scid)[:12]))
651 647
652 648 for b, scid, dcid in advsrc:
653 649 changed.append((b, scid, status,
654 650 _("updating bookmark %s\n") % (b)))
655 651 # remove normal movement from explicit set
656 652 explicit.difference_update(d[0] for d in changed)
657 653
658 654 for b, scid, dcid in diverge:
659 655 if b in explicit:
660 656 explicit.discard(b)
661 657 changed.append((b, scid, status,
662 658 _("importing bookmark %s\n") % (b)))
663 659 else:
664 660 db = _diverge(ui, b, path, localmarks, scid)
665 661 if db:
666 662 changed.append((db, scid, warn,
667 663 _("divergent bookmark %s stored as %s\n") %
668 664 (b, db)))
669 665 else:
670 666 warn(_("warning: failed to assign numbered name "
671 667 "to divergent bookmark %s\n") % (b))
672 668 for b, scid, dcid in adddst + advdst:
673 669 if b in explicit:
674 670 explicit.discard(b)
675 671 changed.append((b, scid, status,
676 672 _("importing bookmark %s\n") % (b)))
677 673 for b, scid, dcid in differ:
678 674 if b in explicit:
679 675 explicit.remove(b)
680 676 ui.warn(_("remote bookmark %s points to locally missing %s\n")
681 677 % (b, hex(scid)[:12]))
682 678
683 679 if changed:
684 680 tr = trfunc()
685 681 changes = []
686 682 for b, node, writer, msg in sorted(changed):
687 683 changes.append((b, node))
688 684 writer(msg)
689 685 localmarks.applychanges(repo, tr, changes)
690 686
691 687 def incoming(ui, repo, peer):
692 688 '''Show bookmarks incoming from other to repo
693 689 '''
694 690 ui.status(_("searching for changed bookmarks\n"))
695 691
696 692 with peer.commandexecutor() as e:
697 693 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
698 694 'namespace': 'bookmarks',
699 695 }).result())
700 696
701 697 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
702 698 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
703 699
704 700 incomings = []
705 701 if ui.debugflag:
706 702 getid = lambda id: id
707 703 else:
708 704 getid = lambda id: id[:12]
709 705 if ui.verbose:
710 706 def add(b, id, st):
711 707 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
712 708 else:
713 709 def add(b, id, st):
714 710 incomings.append(" %-25s %s\n" % (b, getid(id)))
715 711 for b, scid, dcid in addsrc:
716 712 # i18n: "added" refers to a bookmark
717 713 add(b, hex(scid), _('added'))
718 714 for b, scid, dcid in advsrc:
719 715 # i18n: "advanced" refers to a bookmark
720 716 add(b, hex(scid), _('advanced'))
721 717 for b, scid, dcid in diverge:
722 718 # i18n: "diverged" refers to a bookmark
723 719 add(b, hex(scid), _('diverged'))
724 720 for b, scid, dcid in differ:
725 721 # i18n: "changed" refers to a bookmark
726 722 add(b, hex(scid), _('changed'))
727 723
728 724 if not incomings:
729 725 ui.status(_("no changed bookmarks found\n"))
730 726 return 1
731 727
732 728 for s in sorted(incomings):
733 729 ui.write(s)
734 730
735 731 return 0
736 732
737 733 def outgoing(ui, repo, other):
738 734 '''Show bookmarks outgoing from repo to other
739 735 '''
740 736 ui.status(_("searching for changed bookmarks\n"))
741 737
742 738 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
743 739 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
744 740 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
745 741
746 742 outgoings = []
747 743 if ui.debugflag:
748 744 getid = lambda id: id
749 745 else:
750 746 getid = lambda id: id[:12]
751 747 if ui.verbose:
752 748 def add(b, id, st):
753 749 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
754 750 else:
755 751 def add(b, id, st):
756 752 outgoings.append(" %-25s %s\n" % (b, getid(id)))
757 753 for b, scid, dcid in addsrc:
758 754 # i18n: "added refers to a bookmark
759 755 add(b, hex(scid), _('added'))
760 756 for b, scid, dcid in adddst:
761 757 # i18n: "deleted" refers to a bookmark
762 758 add(b, ' ' * 40, _('deleted'))
763 759 for b, scid, dcid in advsrc:
764 760 # i18n: "advanced" refers to a bookmark
765 761 add(b, hex(scid), _('advanced'))
766 762 for b, scid, dcid in diverge:
767 763 # i18n: "diverged" refers to a bookmark
768 764 add(b, hex(scid), _('diverged'))
769 765 for b, scid, dcid in differ:
770 766 # i18n: "changed" refers to a bookmark
771 767 add(b, hex(scid), _('changed'))
772 768
773 769 if not outgoings:
774 770 ui.status(_("no changed bookmarks found\n"))
775 771 return 1
776 772
777 773 for s in sorted(outgoings):
778 774 ui.write(s)
779 775
780 776 return 0
781 777
782 778 def summary(repo, peer):
783 779 '''Compare bookmarks between repo and other for "hg summary" output
784 780
785 781 This returns "(# of incoming, # of outgoing)" tuple.
786 782 '''
787 783 with peer.commandexecutor() as e:
788 784 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
789 785 'namespace': 'bookmarks',
790 786 }).result())
791 787
792 788 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
793 789 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
794 790 return (len(addsrc), len(adddst))
795 791
796 792 def validdest(repo, old, new):
797 793 """Is the new bookmark destination a valid update from the old one"""
798 794 repo = repo.unfiltered()
799 795 if old == new:
800 796 # Old == new -> nothing to update.
801 797 return False
802 798 elif not old:
803 799 # old is nullrev, anything is valid.
804 800 # (new != nullrev has been excluded by the previous check)
805 801 return True
806 802 elif repo.obsstore:
807 803 return new.node() in obsutil.foreground(repo, [old.node()])
808 804 else:
809 805 # still an independent clause as it is lazier (and therefore faster)
810 806 return old.isancestorof(new)
811 807
812 808 def checkformat(repo, mark):
813 809 """return a valid version of a potential bookmark name
814 810
815 811 Raises an abort error if the bookmark name is not valid.
816 812 """
817 813 mark = mark.strip()
818 814 if not mark:
819 815 raise error.Abort(_("bookmark names cannot consist entirely of "
820 816 "whitespace"))
821 817 scmutil.checknewlabel(repo, mark, 'bookmark')
822 818 return mark
823 819
824 820 def delete(repo, tr, names):
825 821 """remove a mark from the bookmark store
826 822
827 823 Raises an abort error if mark does not exist.
828 824 """
829 825 marks = repo._bookmarks
830 826 changes = []
831 827 for mark in names:
832 828 if mark not in marks:
833 829 raise error.Abort(_("bookmark '%s' does not exist") % mark)
834 830 if mark == repo._activebookmark:
835 831 deactivate(repo)
836 832 changes.append((mark, None))
837 833 marks.applychanges(repo, tr, changes)
838 834
839 835 def rename(repo, tr, old, new, force=False, inactive=False):
840 836 """rename a bookmark from old to new
841 837
842 838 If force is specified, then the new name can overwrite an existing
843 839 bookmark.
844 840
845 841 If inactive is specified, then do not activate the new bookmark.
846 842
847 843 Raises an abort error if old is not in the bookmark store.
848 844 """
849 845 marks = repo._bookmarks
850 846 mark = checkformat(repo, new)
851 847 if old not in marks:
852 848 raise error.Abort(_("bookmark '%s' does not exist") % old)
853 849 changes = []
854 850 for bm in marks.checkconflict(mark, force):
855 851 changes.append((bm, None))
856 852 changes.extend([(mark, marks[old]), (old, None)])
857 853 marks.applychanges(repo, tr, changes)
858 854 if repo._activebookmark == old and not inactive:
859 855 activate(repo, mark)
860 856
861 857 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
862 858 """add a list of bookmarks
863 859
864 860 If force is specified, then the new name can overwrite an existing
865 861 bookmark.
866 862
867 863 If inactive is specified, then do not activate any bookmark. Otherwise, the
868 864 first bookmark is activated.
869 865
870 866 Raises an abort error if old is not in the bookmark store.
871 867 """
872 868 marks = repo._bookmarks
873 869 cur = repo['.'].node()
874 870 newact = None
875 871 changes = []
876 872 hiddenrev = None
877 873
878 874 # unhide revs if any
879 875 if rev:
880 876 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
881 877
882 878 for mark in names:
883 879 mark = checkformat(repo, mark)
884 880 if newact is None:
885 881 newact = mark
886 882 if inactive and mark == repo._activebookmark:
887 883 deactivate(repo)
888 884 return
889 885 tgt = cur
890 886 if rev:
891 887 ctx = scmutil.revsingle(repo, rev)
892 888 if ctx.hidden():
893 889 hiddenrev = ctx.hex()[:12]
894 890 tgt = ctx.node()
895 891 for bm in marks.checkconflict(mark, force, tgt):
896 892 changes.append((bm, None))
897 893 changes.append((mark, tgt))
898 894
899 895 if hiddenrev:
900 896 repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev)
901 897
902 898 if ctx.obsolete():
903 899 msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx)
904 900 repo.ui.warn("(%s)\n" % msg)
905 901
906 902 marks.applychanges(repo, tr, changes)
907 903 if not inactive and cur == marks[newact] and not rev:
908 904 activate(repo, newact)
909 905 elif cur != tgt and newact == repo._activebookmark:
910 906 deactivate(repo)
911 907
912 908 def _printbookmarks(ui, repo, fm, bmarks):
913 909 """private method to print bookmarks
914 910
915 911 Provides a way for extensions to control how bookmarks are printed (e.g.
916 912 prepend or postpend names)
917 913 """
918 914 hexfn = fm.hexfunc
919 915 if len(bmarks) == 0 and fm.isplain():
920 916 ui.status(_("no bookmarks set\n"))
921 917 for bmark, (n, prefix, label) in sorted(bmarks.iteritems()):
922 918 fm.startitem()
923 919 fm.context(repo=repo)
924 920 if not ui.quiet:
925 921 fm.plain(' %s ' % prefix, label=label)
926 922 fm.write('bookmark', '%s', bmark, label=label)
927 923 pad = " " * (25 - encoding.colwidth(bmark))
928 924 fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s',
929 925 repo.changelog.rev(n), hexfn(n), label=label)
930 926 fm.data(active=(activebookmarklabel in label))
931 927 fm.plain('\n')
932 928
933 929 def printbookmarks(ui, repo, fm, names=None):
934 930 """print bookmarks by the given formatter
935 931
936 932 Provides a way for extensions to control how bookmarks are printed.
937 933 """
938 934 marks = repo._bookmarks
939 935 bmarks = {}
940 936 for bmark in (names or marks):
941 937 if bmark not in marks:
942 938 raise error.Abort(_("bookmark '%s' does not exist") % bmark)
943 939 active = repo._activebookmark
944 940 if bmark == active:
945 941 prefix, label = '*', activebookmarklabel
946 942 else:
947 943 prefix, label = ' ', ''
948 944
949 945 bmarks[bmark] = (marks[bmark], prefix, label)
950 946 _printbookmarks(ui, repo, fm, bmarks)
951 947
952 948 def preparehookargs(name, old, new):
953 949 if new is None:
954 950 new = ''
955 951 if old is None:
956 952 old = ''
957 953 return {'bookmark': name,
958 954 'node': hex(new),
959 955 'oldnode': hex(old)}
@@ -1,538 +1,538 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 scmutil,
25 25 setdiscovery,
26 26 treediscovery,
27 27 util,
28 28 )
29 29
30 30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 32 subset of nodes between repo and remote.
33 33
34 34 "common" is a list of (at least) the heads of the common subset.
35 35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 36 locally. If remote does not support getbundle, this actually is a list of
37 37 roots of the nodes that would be incoming, to be supplied to
38 38 changegroupsubset. No code except for pull should be relying on this fact
39 39 any longer.
40 40 "heads" is either the supplied heads, or else the remote's heads.
41 41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 42 these nodes. Changeset outside of this set won't be considered (and
43 43 won't appears in "common")
44 44
45 45 If you pass heads and they are all known locally, the response lists just
46 46 these heads in "common" and in "heads".
47 47
48 48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 49 extensions a good hook into outgoing.
50 50 """
51 51
52 52 if not remote.capable('getbundle'):
53 53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54 54
55 55 if heads:
56 56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 57 if all(knownnode(h) for h in heads):
58 58 return (heads, False, heads)
59 59
60 60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 61 abortwhenunrelated=not force,
62 62 ancestorsof=ancestorsof)
63 63 common, anyinc, srvheads = res
64 64 return (list(common), anyinc, heads or list(srvheads))
65 65
66 66 class outgoing(object):
67 67 '''Represents the set of nodes present in a local repo but not in a
68 68 (possibly) remote one.
69 69
70 70 Members:
71 71
72 72 missing is a list of all nodes present in local but not in remote.
73 73 common is a list of all nodes shared between the two repos.
74 74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 75 missingheads is the list of heads of missing.
76 76 commonheads is the list of heads of common.
77 77
78 78 The sets are computed on demand from the heads, unless provided upfront
79 79 by discovery.'''
80 80
81 81 def __init__(self, repo, commonheads=None, missingheads=None,
82 82 missingroots=None):
83 83 # at least one of them must not be set
84 84 assert None in (commonheads, missingroots)
85 85 cl = repo.changelog
86 86 if missingheads is None:
87 87 missingheads = cl.heads()
88 88 if missingroots:
89 89 discbases = []
90 90 for n in missingroots:
91 91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 92 # TODO remove call to nodesbetween.
93 93 # TODO populate attributes on outgoing instance instead of setting
94 94 # discbases.
95 95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 96 included = set(csets)
97 97 missingheads = heads
98 98 commonheads = [n for n in discbases if n not in included]
99 99 elif not commonheads:
100 100 commonheads = [nullid]
101 101 self.commonheads = commonheads
102 102 self.missingheads = missingheads
103 103 self._revlog = cl
104 104 self._common = None
105 105 self._missing = None
106 106 self.excluded = []
107 107
108 108 def _computecommonmissing(self):
109 109 sets = self._revlog.findcommonmissing(self.commonheads,
110 110 self.missingheads)
111 111 self._common, self._missing = sets
112 112
113 113 @util.propertycache
114 114 def common(self):
115 115 if self._common is None:
116 116 self._computecommonmissing()
117 117 return self._common
118 118
119 119 @util.propertycache
120 120 def missing(self):
121 121 if self._missing is None:
122 122 self._computecommonmissing()
123 123 return self._missing
124 124
125 125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 126 commoninc=None, portable=False):
127 127 '''Return an outgoing instance to identify the nodes present in repo but
128 128 not in other.
129 129
130 130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 131 (inclusive) are included. If you already know the local repo's heads,
132 132 passing them in onlyheads is faster than letting them be recomputed here.
133 133
134 134 If commoninc is given, it must be the result of a prior call to
135 135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136 136
137 137 If portable is given, compute more conservative common and missingheads,
138 138 to make bundles created from the instance more portable.'''
139 139 # declare an empty outgoing object to be filled later
140 140 og = outgoing(repo, None, None)
141 141
142 142 # get common set if not provided
143 143 if commoninc is None:
144 144 commoninc = findcommonincoming(repo, other, force=force,
145 145 ancestorsof=onlyheads)
146 146 og.commonheads, _any, _hds = commoninc
147 147
148 148 # compute outgoing
149 149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
150 150 if not mayexclude:
151 151 og.missingheads = onlyheads or repo.heads()
152 152 elif onlyheads is None:
153 153 # use visible heads as it should be cached
154 154 og.missingheads = repo.filtered("served").heads()
155 155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
156 156 else:
157 157 # compute common, missing and exclude secret stuff
158 158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
159 159 og._common, allmissing = sets
160 160 og._missing = missing = []
161 161 og.excluded = excluded = []
162 162 for node in allmissing:
163 163 ctx = repo[node]
164 164 if ctx.phase() >= phases.secret or ctx.extinct():
165 165 excluded.append(node)
166 166 else:
167 167 missing.append(node)
168 168 if len(missing) == len(allmissing):
169 169 missingheads = onlyheads
170 170 else: # update missing heads
171 171 missingheads = phases.newheads(repo, onlyheads, excluded)
172 172 og.missingheads = missingheads
173 173 if portable:
174 174 # recompute common and missingheads as if -r<rev> had been given for
175 175 # each head of missing, and --base <rev> for each head of the proper
176 176 # ancestors of missing
177 177 og._computecommonmissing()
178 178 cl = repo.changelog
179 179 missingrevs = set(cl.rev(n) for n in og._missing)
180 180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
181 181 commonheads = set(og.commonheads)
182 182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
183 183
184 184 return og
185 185
186 186 def _headssummary(pushop):
187 187 """compute a summary of branch and heads status before and after push
188 188
189 189 return {'branch': ([remoteheads], [newheads],
190 190 [unsyncedheads], [discardedheads])} mapping
191 191
192 192 - branch: the branch name,
193 193 - remoteheads: the list of remote heads known locally
194 194 None if the branch is new,
195 195 - newheads: the new remote heads (known locally) with outgoing pushed,
196 196 - unsyncedheads: the list of remote heads unknown locally,
197 197 - discardedheads: the list of heads made obsolete by the push.
198 198 """
199 199 repo = pushop.repo.unfiltered()
200 200 remote = pushop.remote
201 201 outgoing = pushop.outgoing
202 202 cl = repo.changelog
203 203 headssum = {}
204 204 missingctx = set()
205 205 # A. Create set of branches involved in the push.
206 206 branches = set()
207 207 for n in outgoing.missing:
208 208 ctx = repo[n]
209 209 missingctx.add(ctx)
210 210 branches.add(ctx.branch())
211 211
212 212 with remote.commandexecutor() as e:
213 213 remotemap = e.callcommand('branchmap', {}).result()
214 214
215 215 knownnode = cl.hasnode # do not use nodemap until it is filtered
216 216 # A. register remote heads of branches which are in outgoing set
217 217 for branch, heads in remotemap.iteritems():
218 218 # don't add head info about branches which we don't have locally
219 219 if branch not in branches:
220 220 continue
221 221 known = []
222 222 unsynced = []
223 223 for h in heads:
224 224 if knownnode(h):
225 225 known.append(h)
226 226 else:
227 227 unsynced.append(h)
228 228 headssum[branch] = (known, list(known), unsynced)
229 229
230 230 # B. add new branch data
231 231 for branch in branches:
232 232 if branch not in headssum:
233 233 headssum[branch] = (None, [], [])
234 234
235 235 # C. Update newmap with outgoing changes.
236 236 # This will possibly add new heads and remove existing ones.
237 237 newmap = branchmap.remotebranchcache((branch, heads[1])
238 238 for branch, heads in headssum.iteritems()
239 239 if heads[0] is not None)
240 240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
241 241 for branch, newheads in newmap.iteritems():
242 242 headssum[branch][1][:] = newheads
243 243 for branch, items in headssum.iteritems():
244 244 for l in items:
245 245 if l is not None:
246 246 l.sort()
247 247 headssum[branch] = items + ([],)
248 248
249 249 # If there are no obsstore, no post processing are needed.
250 250 if repo.obsstore:
251 251 torev = repo.changelog.rev
252 252 futureheads = set(torev(h) for h in outgoing.missingheads)
253 253 futureheads |= set(torev(h) for h in outgoing.commonheads)
254 254 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
255 255 for branch, heads in sorted(headssum.iteritems()):
256 256 remoteheads, newheads, unsyncedheads, placeholder = heads
257 257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
258 258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
259 259 sorted(result[1]))
260 260 return headssum
261 261
262 262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
263 263 """Compute branchmapsummary for repo without branchmap support"""
264 264
265 265 # 1-4b. old servers: Check for new topological heads.
266 266 # Construct {old,new}map with branch = None (topological branch).
267 267 # (code based on update)
268 268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
269 269 oldheads = sorted(h for h in remoteheads if knownnode(h))
270 270 # all nodes in outgoing.missing are children of either:
271 271 # - an element of oldheads
272 272 # - another element of outgoing.missing
273 273 # - nullrev
274 274 # This explains why the new head are very simple to compute.
275 275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
276 276 newheads = sorted(c.node() for c in r)
277 277 # set some unsynced head to issue the "unsynced changes" warning
278 278 if inc:
279 279 unsynced = [None]
280 280 else:
281 281 unsynced = []
282 282 return {None: (oldheads, newheads, unsynced, [])}
283 283
284 284 def _nowarnheads(pushop):
285 285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
286 286 repo = pushop.repo.unfiltered()
287 287 remote = pushop.remote
288 288 localbookmarks = repo._bookmarks
289 289
290 290 with remote.commandexecutor() as e:
291 291 remotebookmarks = e.callcommand('listkeys', {
292 292 'namespace': 'bookmarks',
293 293 }).result()
294 294
295 295 bookmarkedheads = set()
296 296
297 297 # internal config: bookmarks.pushing
298 298 newbookmarks = [localbookmarks.expandname(b)
299 299 for b in pushop.ui.configlist('bookmarks', 'pushing')]
300 300
301 301 for bm in localbookmarks:
302 302 rnode = remotebookmarks.get(bm)
303 303 if rnode and rnode in repo:
304 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
304 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
305 305 if bookmarks.validdest(repo, rctx, lctx):
306 306 bookmarkedheads.add(lctx.node())
307 307 else:
308 308 if bm in newbookmarks and bm not in remotebookmarks:
309 309 bookmarkedheads.add(localbookmarks[bm])
310 310
311 311 return bookmarkedheads
312 312
313 313 def checkheads(pushop):
314 314 """Check that a push won't add any outgoing head
315 315
316 316 raise Abort error and display ui message as needed.
317 317 """
318 318
319 319 repo = pushop.repo.unfiltered()
320 320 remote = pushop.remote
321 321 outgoing = pushop.outgoing
322 322 remoteheads = pushop.remoteheads
323 323 newbranch = pushop.newbranch
324 324 inc = bool(pushop.incoming)
325 325
326 326 # Check for each named branch if we're creating new remote heads.
327 327 # To be a remote head after push, node must be either:
328 328 # - unknown locally
329 329 # - a local outgoing head descended from update
330 330 # - a remote head that's known locally and not
331 331 # ancestral to an outgoing head
332 332 if remoteheads == [nullid]:
333 333 # remote is empty, nothing to check.
334 334 return
335 335
336 336 if remote.capable('branchmap'):
337 337 headssum = _headssummary(pushop)
338 338 else:
339 339 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
340 340 pushop.pushbranchmap = headssum
341 341 newbranches = [branch for branch, heads in headssum.iteritems()
342 342 if heads[0] is None]
343 343 # 1. Check for new branches on the remote.
344 344 if newbranches and not newbranch: # new branch requires --new-branch
345 345 branchnames = ', '.join(sorted(newbranches))
346 346 # Calculate how many of the new branches are closed branches
347 347 closedbranches = set()
348 348 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
349 349 if isclosed:
350 350 closedbranches.add(tag)
351 351 closedbranches = (closedbranches & set(newbranches))
352 352 if closedbranches:
353 353 errmsg = (_("push creates new remote branches: %s (%d closed)!")
354 354 % (branchnames, len(closedbranches)))
355 355 else:
356 356 errmsg = (_("push creates new remote branches: %s!")% branchnames)
357 357 hint=_("use 'hg push --new-branch' to create new remote branches")
358 358 raise error.Abort(errmsg, hint=hint)
359 359
360 360 # 2. Find heads that we need not warn about
361 361 nowarnheads = _nowarnheads(pushop)
362 362
363 363 # 3. Check for new heads.
364 364 # If there are more heads after the push than before, a suitable
365 365 # error message, depending on unsynced status, is displayed.
366 366 errormsg = None
367 367 for branch, heads in sorted(headssum.iteritems()):
368 368 remoteheads, newheads, unsyncedheads, discardedheads = heads
369 369 # add unsynced data
370 370 if remoteheads is None:
371 371 oldhs = set()
372 372 else:
373 373 oldhs = set(remoteheads)
374 374 oldhs.update(unsyncedheads)
375 375 dhs = None # delta heads, the new heads on branch
376 376 newhs = set(newheads)
377 377 newhs.update(unsyncedheads)
378 378 if unsyncedheads:
379 379 if None in unsyncedheads:
380 380 # old remote, no heads data
381 381 heads = None
382 382 else:
383 383 heads = scmutil.nodesummaries(repo, unsyncedheads)
384 384 if heads is None:
385 385 repo.ui.status(_("remote has heads that are "
386 386 "not known locally\n"))
387 387 elif branch is None:
388 388 repo.ui.status(_("remote has heads that are "
389 389 "not known locally: %s\n") % heads)
390 390 else:
391 391 repo.ui.status(_("remote has heads on branch '%s' that are "
392 392 "not known locally: %s\n") % (branch, heads))
393 393 if remoteheads is None:
394 394 if len(newhs) > 1:
395 395 dhs = list(newhs)
396 396 if errormsg is None:
397 397 errormsg = (_("push creates new branch '%s' "
398 398 "with multiple heads") % (branch))
399 399 hint = _("merge or"
400 400 " see 'hg help push' for details about"
401 401 " pushing new heads")
402 402 elif len(newhs) > len(oldhs):
403 403 # remove bookmarked or existing remote heads from the new heads list
404 404 dhs = sorted(newhs - nowarnheads - oldhs)
405 405 if dhs:
406 406 if errormsg is None:
407 407 if branch not in ('default', None):
408 408 errormsg = _("push creates new remote head %s "
409 409 "on branch '%s'!") % (short(dhs[0]), branch)
410 410 elif repo[dhs[0]].bookmarks():
411 411 errormsg = _("push creates new remote head %s "
412 412 "with bookmark '%s'!") % (
413 413 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
414 414 else:
415 415 errormsg = _("push creates new remote head %s!"
416 416 ) % short(dhs[0])
417 417 if unsyncedheads:
418 418 hint = _("pull and merge or"
419 419 " see 'hg help push' for details about"
420 420 " pushing new heads")
421 421 else:
422 422 hint = _("merge or"
423 423 " see 'hg help push' for details about"
424 424 " pushing new heads")
425 425 if branch is None:
426 426 repo.ui.note(_("new remote heads:\n"))
427 427 else:
428 428 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
429 429 for h in dhs:
430 430 repo.ui.note((" %s\n") % short(h))
431 431 if errormsg:
432 432 raise error.Abort(errormsg, hint=hint)
433 433
434 434 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
435 435 """post process the list of new heads with obsolescence information
436 436
437 437 Exists as a sub-function to contain the complexity and allow extensions to
438 438 experiment with smarter logic.
439 439
440 440 Returns (newheads, discarded_heads) tuple
441 441 """
442 442 # known issue
443 443 #
444 444 # * We "silently" skip processing on all changeset unknown locally
445 445 #
446 446 # * if <nh> is public on the remote, it won't be affected by obsolete
447 447 # marker and a new is created
448 448
449 449 # define various utilities and containers
450 450 repo = pushop.repo
451 451 unfi = repo.unfiltered()
452 452 tonode = unfi.changelog.node
453 453 torev = unfi.changelog.nodemap.get
454 454 public = phases.public
455 455 getphase = unfi._phasecache.phase
456 456 ispublic = (lambda r: getphase(unfi, r) == public)
457 457 ispushed = (lambda n: torev(n) in futurecommon)
458 458 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
459 459 successorsmarkers = unfi.obsstore.successors
460 460 newhs = set() # final set of new heads
461 461 discarded = set() # new head of fully replaced branch
462 462
463 463 localcandidate = set() # candidate heads known locally
464 464 unknownheads = set() # candidate heads unknown locally
465 465 for h in candidate_newhs:
466 466 if h in unfi:
467 467 localcandidate.add(h)
468 468 else:
469 469 if successorsmarkers.get(h) is not None:
470 470 msg = ('checkheads: remote head unknown locally has'
471 471 ' local marker: %s\n')
472 472 repo.ui.debug(msg % hex(h))
473 473 unknownheads.add(h)
474 474
475 475 # fast path the simple case
476 476 if len(localcandidate) == 1:
477 477 return unknownheads | set(candidate_newhs), set()
478 478
479 479 # actually process branch replacement
480 480 while localcandidate:
481 481 nh = localcandidate.pop()
482 482 # run this check early to skip the evaluation of the whole branch
483 483 if (torev(nh) in futurecommon or ispublic(torev(nh))):
484 484 newhs.add(nh)
485 485 continue
486 486
487 487 # Get all revs/nodes on the branch exclusive to this head
488 488 # (already filtered heads are "ignored"))
489 489 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
490 490 nh, localcandidate, newhs)
491 491 branchnodes = [tonode(r) for r in branchrevs]
492 492
493 493 # The branch won't be hidden on the remote if
494 494 # * any part of it is public,
495 495 # * any part of it is considered part of the result by previous logic,
496 496 # * if we have no markers to push to obsolete it.
497 497 if (any(ispublic(r) for r in branchrevs)
498 498 or any(torev(n) in futurecommon for n in branchnodes)
499 499 or any(not hasoutmarker(n) for n in branchnodes)):
500 500 newhs.add(nh)
501 501 else:
502 502 # note: there is a corner case if there is a merge in the branch.
503 503 # we might end up with -more- heads. However, these heads are not
504 504 # "added" by the push, but more by the "removal" on the remote so I
505 505 # think is a okay to ignore them,
506 506 discarded.add(nh)
507 507 newhs |= unknownheads
508 508 return newhs, discarded
509 509
510 510 def pushingmarkerfor(obsstore, ispushed, node):
511 511 """true if some markers are to be pushed for node
512 512
513 513 We cannot just look in to the pushed obsmarkers from the pushop because
514 514 discovery might have filtered relevant markers. In addition listing all
515 515 markers relevant to all changesets in the pushed set would be too expensive
516 516 (O(len(repo)))
517 517
518 518 (note: There are cache opportunity in this function. but it would requires
519 519 a two dimensional stack.)
520 520 """
521 521 successorsmarkers = obsstore.successors
522 522 stack = [node]
523 523 seen = set(stack)
524 524 while stack:
525 525 current = stack.pop()
526 526 if ispushed(current):
527 527 return True
528 528 markers = successorsmarkers.get(current, ())
529 529 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
530 530 for m in markers:
531 531 nexts = m[1] # successors
532 532 if not nexts: # this is a prune marker
533 533 nexts = m[5] or () # parents
534 534 for n in nexts:
535 535 if n not in seen:
536 536 seen.add(n)
537 537 stack.append(n)
538 538 return False
General Comments 0
You need to be logged in to leave comments. Login now