##// END OF EJS Templates
pull: perform bookmark updates in the transaction
Pierre-Yves David -
r22666:0f8120c1 default
parent child Browse files
Show More
@@ -1,435 +1,436
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial.node import hex, bin
10 10 from mercurial import encoding, error, util, obsolete
11 11 import errno
12 12
13 13 class bmstore(dict):
14 14 """Storage for bookmarks.
15 15
16 16 This object should do all bookmark reads and writes, so that it's
17 17 fairly simple to replace the storage underlying bookmarks without
18 18 having to clone the logic surrounding bookmarks.
19 19
20 20 This particular bmstore implementation stores bookmarks as
21 21 {hash}\s{name}\n (the same format as localtags) in
22 22 .hg/bookmarks. The mapping is stored as {name: nodeid}.
23 23
24 24 This class does NOT handle the "current" bookmark state at this
25 25 time.
26 26 """
27 27
28 28 def __init__(self, repo):
29 29 dict.__init__(self)
30 30 self._repo = repo
31 31 try:
32 32 for line in repo.vfs('bookmarks'):
33 33 line = line.strip()
34 34 if not line:
35 35 continue
36 36 if ' ' not in line:
37 37 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
38 38 % line)
39 39 continue
40 40 sha, refspec = line.split(' ', 1)
41 41 refspec = encoding.tolocal(refspec)
42 42 try:
43 43 self[refspec] = repo.changelog.lookup(sha)
44 44 except LookupError:
45 45 pass
46 46 except IOError, inst:
47 47 if inst.errno != errno.ENOENT:
48 48 raise
49 49
50 50 def recordchange(self, tr):
51 51 """record that bookmarks have been changed in a transaction
52 52
53 53 The transaction is then responsible for updating the file content."""
54 54 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
55 55 vfs=self._repo.vfs)
56 56
57 57 def write(self):
58 58 '''Write bookmarks
59 59
60 60 Write the given bookmark => hash dictionary to the .hg/bookmarks file
61 61 in a format equal to those of localtags.
62 62
63 63 We also store a backup of the previous state in undo.bookmarks that
64 64 can be copied back on rollback.
65 65 '''
66 66 repo = self._repo
67 67 if repo._bookmarkcurrent not in self:
68 68 unsetcurrent(repo)
69 69
70 70 wlock = repo.wlock()
71 71 try:
72 72
73 73 file = repo.vfs('bookmarks', 'w', atomictemp=True)
74 74 self._write(file)
75 75 file.close()
76 76
77 77 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
78 78 try:
79 79 repo.svfs.utime('00changelog.i', None)
80 80 except OSError:
81 81 pass
82 82
83 83 finally:
84 84 wlock.release()
85 85
86 86 def _write(self, fp):
87 87 for name, node in self.iteritems():
88 88 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
89 89
90 90 def readcurrent(repo):
91 91 '''Get the current bookmark
92 92
93 93 If we use gittish branches we have a current bookmark that
94 94 we are on. This function returns the name of the bookmark. It
95 95 is stored in .hg/bookmarks.current
96 96 '''
97 97 mark = None
98 98 try:
99 99 file = repo.opener('bookmarks.current')
100 100 except IOError, inst:
101 101 if inst.errno != errno.ENOENT:
102 102 raise
103 103 return None
104 104 try:
105 105 # No readline() in osutil.posixfile, reading everything is cheap
106 106 mark = encoding.tolocal((file.readlines() or [''])[0])
107 107 if mark == '' or mark not in repo._bookmarks:
108 108 mark = None
109 109 finally:
110 110 file.close()
111 111 return mark
112 112
113 113 def setcurrent(repo, mark):
114 114 '''Set the name of the bookmark that we are currently on
115 115
116 116 Set the name of the bookmark that we are on (hg update <bookmark>).
117 117 The name is recorded in .hg/bookmarks.current
118 118 '''
119 119 if mark not in repo._bookmarks:
120 120 raise AssertionError('bookmark %s does not exist!' % mark)
121 121
122 122 current = repo._bookmarkcurrent
123 123 if current == mark:
124 124 return
125 125
126 126 wlock = repo.wlock()
127 127 try:
128 128 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
129 129 file.write(encoding.fromlocal(mark))
130 130 file.close()
131 131 finally:
132 132 wlock.release()
133 133 repo._bookmarkcurrent = mark
134 134
135 135 def unsetcurrent(repo):
136 136 wlock = repo.wlock()
137 137 try:
138 138 try:
139 139 repo.vfs.unlink('bookmarks.current')
140 140 repo._bookmarkcurrent = None
141 141 except OSError, inst:
142 142 if inst.errno != errno.ENOENT:
143 143 raise
144 144 finally:
145 145 wlock.release()
146 146
147 147 def iscurrent(repo, mark=None, parents=None):
148 148 '''Tell whether the current bookmark is also active
149 149
150 150 I.e., the bookmark listed in .hg/bookmarks.current also points to a
151 151 parent of the working directory.
152 152 '''
153 153 if not mark:
154 154 mark = repo._bookmarkcurrent
155 155 if not parents:
156 156 parents = [p.node() for p in repo[None].parents()]
157 157 marks = repo._bookmarks
158 158 return (mark in marks and marks[mark] in parents)
159 159
160 160 def updatecurrentbookmark(repo, oldnode, curbranch):
161 161 try:
162 162 return update(repo, oldnode, repo.branchtip(curbranch))
163 163 except error.RepoLookupError:
164 164 if curbranch == "default": # no default branch!
165 165 return update(repo, oldnode, repo.lookup("tip"))
166 166 else:
167 167 raise util.Abort(_("branch %s not found") % curbranch)
168 168
169 169 def deletedivergent(repo, deletefrom, bm):
170 170 '''Delete divergent versions of bm on nodes in deletefrom.
171 171
172 172 Return True if at least one bookmark was deleted, False otherwise.'''
173 173 deleted = False
174 174 marks = repo._bookmarks
175 175 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
176 176 for mark in divergent:
177 177 if mark == '@' or '@' not in mark:
178 178 # can't be divergent by definition
179 179 continue
180 180 if mark and marks[mark] in deletefrom:
181 181 if mark != bm:
182 182 del marks[mark]
183 183 deleted = True
184 184 return deleted
185 185
186 186 def calculateupdate(ui, repo, checkout):
187 187 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
188 188 check out and where to move the active bookmark from, if needed.'''
189 189 movemarkfrom = None
190 190 if checkout is None:
191 191 curmark = repo._bookmarkcurrent
192 192 if iscurrent(repo):
193 193 movemarkfrom = repo['.'].node()
194 194 elif curmark:
195 195 ui.status(_("updating to active bookmark %s\n") % curmark)
196 196 checkout = curmark
197 197 return (checkout, movemarkfrom)
198 198
199 199 def update(repo, parents, node):
200 200 deletefrom = parents
201 201 marks = repo._bookmarks
202 202 update = False
203 203 cur = repo._bookmarkcurrent
204 204 if not cur:
205 205 return False
206 206
207 207 if marks[cur] in parents:
208 208 new = repo[node]
209 209 divs = [repo[b] for b in marks
210 210 if b.split('@', 1)[0] == cur.split('@', 1)[0]]
211 211 anc = repo.changelog.ancestors([new.rev()])
212 212 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
213 213 if validdest(repo, repo[marks[cur]], new):
214 214 marks[cur] = new.node()
215 215 update = True
216 216
217 217 if deletedivergent(repo, deletefrom, cur):
218 218 update = True
219 219
220 220 if update:
221 221 marks.write()
222 222 return update
223 223
224 224 def listbookmarks(repo):
225 225 # We may try to list bookmarks on a repo type that does not
226 226 # support it (e.g., statichttprepository).
227 227 marks = getattr(repo, '_bookmarks', {})
228 228
229 229 d = {}
230 230 hasnode = repo.changelog.hasnode
231 231 for k, v in marks.iteritems():
232 232 # don't expose local divergent bookmarks
233 233 if hasnode(v) and ('@' not in k or k.endswith('@')):
234 234 d[k] = hex(v)
235 235 return d
236 236
237 237 def pushbookmark(repo, key, old, new):
238 238 w = repo.wlock()
239 239 try:
240 240 marks = repo._bookmarks
241 241 existing = hex(marks.get(key, ''))
242 242 if existing != old and existing != new:
243 243 return False
244 244 if new == '':
245 245 del marks[key]
246 246 else:
247 247 if new not in repo:
248 248 return False
249 249 marks[key] = repo[new].node()
250 250 marks.write()
251 251 return True
252 252 finally:
253 253 w.release()
254 254
255 255 def compare(repo, srcmarks, dstmarks,
256 256 srchex=None, dsthex=None, targets=None):
257 257 '''Compare bookmarks between srcmarks and dstmarks
258 258
259 259 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
260 260 differ, invalid)", each are list of bookmarks below:
261 261
262 262 :addsrc: added on src side (removed on dst side, perhaps)
263 263 :adddst: added on dst side (removed on src side, perhaps)
264 264 :advsrc: advanced on src side
265 265 :advdst: advanced on dst side
266 266 :diverge: diverge
267 267 :differ: changed, but changeset referred on src is unknown on dst
268 268 :invalid: unknown on both side
269 269
270 270 Each elements of lists in result tuple is tuple "(bookmark name,
271 271 changeset ID on source side, changeset ID on destination
272 272 side)". Each changeset IDs are 40 hexadecimal digit string or
273 273 None.
274 274
275 275 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
276 276 "invalid" list may be unknown for repo.
277 277
278 278 This function expects that "srcmarks" and "dstmarks" return
279 279 changeset ID in 40 hexadecimal digit string for specified
280 280 bookmark. If not so (e.g. bmstore "repo._bookmarks" returning
281 281 binary value), "srchex" or "dsthex" should be specified to convert
282 282 into such form.
283 283
284 284 If "targets" is specified, only bookmarks listed in it are
285 285 examined.
286 286 '''
287 287 if not srchex:
288 288 srchex = lambda x: x
289 289 if not dsthex:
290 290 dsthex = lambda x: x
291 291
292 292 if targets:
293 293 bset = set(targets)
294 294 else:
295 295 srcmarkset = set(srcmarks)
296 296 dstmarkset = set(dstmarks)
297 297 bset = srcmarkset ^ dstmarkset
298 298 for b in srcmarkset & dstmarkset:
299 299 if srchex(srcmarks[b]) != dsthex(dstmarks[b]):
300 300 bset.add(b)
301 301
302 302 results = ([], [], [], [], [], [], [])
303 303 addsrc = results[0].append
304 304 adddst = results[1].append
305 305 advsrc = results[2].append
306 306 advdst = results[3].append
307 307 diverge = results[4].append
308 308 differ = results[5].append
309 309 invalid = results[6].append
310 310
311 311 for b in sorted(bset):
312 312 if b not in srcmarks:
313 313 if b in dstmarks:
314 314 adddst((b, None, dsthex(dstmarks[b])))
315 315 else:
316 316 invalid((b, None, None))
317 317 elif b not in dstmarks:
318 318 addsrc((b, srchex(srcmarks[b]), None))
319 319 else:
320 320 scid = srchex(srcmarks[b])
321 321 dcid = dsthex(dstmarks[b])
322 322 if scid in repo and dcid in repo:
323 323 sctx = repo[scid]
324 324 dctx = repo[dcid]
325 325 if sctx.rev() < dctx.rev():
326 326 if validdest(repo, sctx, dctx):
327 327 advdst((b, scid, dcid))
328 328 else:
329 329 diverge((b, scid, dcid))
330 330 else:
331 331 if validdest(repo, dctx, sctx):
332 332 advsrc((b, scid, dcid))
333 333 else:
334 334 diverge((b, scid, dcid))
335 335 else:
336 336 # it is too expensive to examine in detail, in this case
337 337 differ((b, scid, dcid))
338 338
339 339 return results
340 340
341 341 def _diverge(ui, b, path, localmarks):
342 342 if b == '@':
343 343 b = ''
344 344 # find a unique @ suffix
345 345 for x in range(1, 100):
346 346 n = '%s@%d' % (b, x)
347 347 if n not in localmarks:
348 348 break
349 349 # try to use an @pathalias suffix
350 350 # if an @pathalias already exists, we overwrite (update) it
351 351 if path.startswith("file:"):
352 352 path = util.url(path).path
353 353 for p, u in ui.configitems("paths"):
354 354 if u.startswith("file:"):
355 355 u = util.url(u).path
356 356 if path == u:
357 357 n = '%s@%s' % (b, p)
358 358 return n
359 359
360 def updatefromremote(ui, repo, remotemarks, path, explicit=()):
360 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
361 361 ui.debug("checking for updated bookmarks\n")
362 362 localmarks = repo._bookmarks
363 363 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
364 364 ) = compare(repo, remotemarks, localmarks, dsthex=hex)
365 365
366 366 status = ui.status
367 367 warn = ui.warn
368 368 if ui.configbool('ui', 'quietbookmarkmove', False):
369 369 status = warn = ui.debug
370 370
371 371 explicit = set(explicit)
372 372 changed = []
373 373 for b, scid, dcid in addsrc:
374 374 if scid in repo: # add remote bookmarks for changes we already have
375 375 changed.append((b, bin(scid), status,
376 376 _("adding remote bookmark %s\n") % (b)))
377 377 for b, scid, dcid in advsrc:
378 378 changed.append((b, bin(scid), status,
379 379 _("updating bookmark %s\n") % (b)))
380 380 # remove normal movement from explicit set
381 381 explicit.difference_update(d[0] for d in changed)
382 382
383 383 for b, scid, dcid in diverge:
384 384 if b in explicit:
385 385 explicit.discard(b)
386 386 changed.append((b, bin(scid), status,
387 387 _("importing bookmark %s\n") % (b, b)))
388 388 else:
389 389 db = _diverge(ui, b, path, localmarks)
390 390 changed.append((db, bin(scid), warn,
391 391 _("divergent bookmark %s stored as %s\n")
392 392 % (b, db)))
393 393 for b, scid, dcid in adddst + advdst:
394 394 if b in explicit:
395 395 explicit.discard(b)
396 396 changed.append((b, bin(scid), status,
397 397 _("importing bookmark %s\n") % (b, b)))
398 398
399 399 if changed:
400 tr = trfunc()
400 401 for b, node, writer, msg in sorted(changed):
401 402 localmarks[b] = node
402 403 writer(msg)
403 localmarks.write()
404 localmarks.recordchange(tr)
404 405
405 406 def diff(ui, dst, src):
406 407 ui.status(_("searching for changed bookmarks\n"))
407 408
408 409 smarks = src.listkeys('bookmarks')
409 410 dmarks = dst.listkeys('bookmarks')
410 411
411 412 diff = sorted(set(smarks) - set(dmarks))
412 413 for k in diff:
413 414 mark = ui.debugflag and smarks[k] or smarks[k][:12]
414 415 ui.write(" %-25s %s\n" % (k, mark))
415 416
416 417 if len(diff) <= 0:
417 418 ui.status(_("no changed bookmarks found\n"))
418 419 return 1
419 420 return 0
420 421
421 422 def validdest(repo, old, new):
422 423 """Is the new bookmark destination a valid update from the old one"""
423 424 repo = repo.unfiltered()
424 425 if old == new:
425 426 # Old == new -> nothing to update.
426 427 return False
427 428 elif not old:
428 429 # old is nullrev, anything is valid.
429 430 # (new != nullrev has been excluded by the previous check)
430 431 return True
431 432 elif repo.obsstore:
432 433 return new.node() in obsolete.foreground(repo, [old.node()])
433 434 else:
434 435 # still an independent clause as it is lazyer (and therefore faster)
435 436 return old.descendant(new)
@@ -1,1209 +1,1210
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.cg1unpacker(fh, alg)
35 35 elif version == '2X':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40 def buildobsmarkerspart(bundler, markers):
41 41 """add an obsmarker part to the bundler with <markers>
42 42
43 43 No part is created if markers is empty.
44 44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 45 """
46 46 if markers:
47 47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 48 version = obsolete.commonversion(remoteversions)
49 49 if version is None:
50 50 raise ValueError('bundler do not support common obsmarker format')
51 51 stream = obsolete.encodemarkers(markers, True, version=version)
52 52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
53 53 return None
54 54
55 55 class pushoperation(object):
56 56 """A object that represent a single push operation
57 57
58 58 It purpose is to carry push related state and very common operation.
59 59
60 60 A new should be created at the beginning of each push and discarded
61 61 afterward.
62 62 """
63 63
64 64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
65 65 bookmarks=()):
66 66 # repo we push from
67 67 self.repo = repo
68 68 self.ui = repo.ui
69 69 # repo we push to
70 70 self.remote = remote
71 71 # force option provided
72 72 self.force = force
73 73 # revs to be pushed (None is "all")
74 74 self.revs = revs
75 75 # bookmark explicitly pushed
76 76 self.bookmarks = bookmarks
77 77 # allow push of new branch
78 78 self.newbranch = newbranch
79 79 # did a local lock get acquired?
80 80 self.locallocked = None
81 81 # step already performed
82 82 # (used to check what steps have been already performed through bundle2)
83 83 self.stepsdone = set()
84 84 # Integer version of the changegroup push result
85 85 # - None means nothing to push
86 86 # - 0 means HTTP error
87 87 # - 1 means we pushed and remote head count is unchanged *or*
88 88 # we have outgoing changesets but refused to push
89 89 # - other values as described by addchangegroup()
90 90 self.cgresult = None
91 91 # Boolean value for the bookmark push
92 92 self.bkresult = None
93 93 # discover.outgoing object (contains common and outgoing data)
94 94 self.outgoing = None
95 95 # all remote heads before the push
96 96 self.remoteheads = None
97 97 # testable as a boolean indicating if any nodes are missing locally.
98 98 self.incoming = None
99 99 # phases changes that must be pushed along side the changesets
100 100 self.outdatedphases = None
101 101 # phases changes that must be pushed if changeset push fails
102 102 self.fallbackoutdatedphases = None
103 103 # outgoing obsmarkers
104 104 self.outobsmarkers = set()
105 105 # outgoing bookmarks
106 106 self.outbookmarks = []
107 107
108 108 @util.propertycache
109 109 def futureheads(self):
110 110 """future remote heads if the changeset push succeeds"""
111 111 return self.outgoing.missingheads
112 112
113 113 @util.propertycache
114 114 def fallbackheads(self):
115 115 """future remote heads if the changeset push fails"""
116 116 if self.revs is None:
117 117 # not target to push, all common are relevant
118 118 return self.outgoing.commonheads
119 119 unfi = self.repo.unfiltered()
120 120 # I want cheads = heads(::missingheads and ::commonheads)
121 121 # (missingheads is revs with secret changeset filtered out)
122 122 #
123 123 # This can be expressed as:
124 124 # cheads = ( (missingheads and ::commonheads)
125 125 # + (commonheads and ::missingheads))"
126 126 # )
127 127 #
128 128 # while trying to push we already computed the following:
129 129 # common = (::commonheads)
130 130 # missing = ((commonheads::missingheads) - commonheads)
131 131 #
132 132 # We can pick:
133 133 # * missingheads part of common (::commonheads)
134 134 common = set(self.outgoing.common)
135 135 nm = self.repo.changelog.nodemap
136 136 cheads = [node for node in self.revs if nm[node] in common]
137 137 # and
138 138 # * commonheads parents on missing
139 139 revset = unfi.set('%ln and parents(roots(%ln))',
140 140 self.outgoing.commonheads,
141 141 self.outgoing.missing)
142 142 cheads.extend(c.node() for c in revset)
143 143 return cheads
144 144
145 145 @property
146 146 def commonheads(self):
147 147 """set of all common heads after changeset bundle push"""
148 148 if self.cgresult:
149 149 return self.futureheads
150 150 else:
151 151 return self.fallbackheads
152 152
153 153 # mapping of message used when pushing bookmark
154 154 bookmsgmap = {'update': (_("updating bookmark %s\n"),
155 155 _('updating bookmark %s failed!\n')),
156 156 'export': (_("exporting bookmark %s\n"),
157 157 _('exporting bookmark %s failed!\n')),
158 158 'delete': (_("deleting remote bookmark %s\n"),
159 159 _('deleting remote bookmark %s failed!\n')),
160 160 }
161 161
162 162
163 163 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
164 164 '''Push outgoing changesets (limited by revs) from a local
165 165 repository to remote. Return an integer:
166 166 - None means nothing to push
167 167 - 0 means HTTP error
168 168 - 1 means we pushed and remote head count is unchanged *or*
169 169 we have outgoing changesets but refused to push
170 170 - other values as described by addchangegroup()
171 171 '''
172 172 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
173 173 if pushop.remote.local():
174 174 missing = (set(pushop.repo.requirements)
175 175 - pushop.remote.local().supported)
176 176 if missing:
177 177 msg = _("required features are not"
178 178 " supported in the destination:"
179 179 " %s") % (', '.join(sorted(missing)))
180 180 raise util.Abort(msg)
181 181
182 182 # there are two ways to push to remote repo:
183 183 #
184 184 # addchangegroup assumes local user can lock remote
185 185 # repo (local filesystem, old ssh servers).
186 186 #
187 187 # unbundle assumes local user cannot lock remote repo (new ssh
188 188 # servers, http servers).
189 189
190 190 if not pushop.remote.canpush():
191 191 raise util.Abort(_("destination does not support push"))
192 192 # get local lock as we might write phase data
193 193 locallock = None
194 194 try:
195 195 locallock = pushop.repo.lock()
196 196 pushop.locallocked = True
197 197 except IOError, err:
198 198 pushop.locallocked = False
199 199 if err.errno != errno.EACCES:
200 200 raise
201 201 # source repo cannot be locked.
202 202 # We do not abort the push, but just disable the local phase
203 203 # synchronisation.
204 204 msg = 'cannot lock source repository: %s\n' % err
205 205 pushop.ui.debug(msg)
206 206 try:
207 207 pushop.repo.checkpush(pushop)
208 208 lock = None
209 209 unbundle = pushop.remote.capable('unbundle')
210 210 if not unbundle:
211 211 lock = pushop.remote.lock()
212 212 try:
213 213 _pushdiscovery(pushop)
214 214 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
215 215 False)
216 216 and pushop.remote.capable('bundle2-exp')):
217 217 _pushbundle2(pushop)
218 218 _pushchangeset(pushop)
219 219 _pushsyncphase(pushop)
220 220 _pushobsolete(pushop)
221 221 _pushbookmark(pushop)
222 222 finally:
223 223 if lock is not None:
224 224 lock.release()
225 225 finally:
226 226 if locallock is not None:
227 227 locallock.release()
228 228
229 229 return pushop
230 230
231 231 # list of steps to perform discovery before push
232 232 pushdiscoveryorder = []
233 233
234 234 # Mapping between step name and function
235 235 #
236 236 # This exists to help extensions wrap steps if necessary
237 237 pushdiscoverymapping = {}
238 238
239 239 def pushdiscovery(stepname):
240 240 """decorator for function performing discovery before push
241 241
242 242 The function is added to the step -> function mapping and appended to the
243 243 list of steps. Beware that decorated function will be added in order (this
244 244 may matter).
245 245
246 246 You can only use this decorator for a new step, if you want to wrap a step
247 247 from an extension, change the pushdiscovery dictionary directly."""
248 248 def dec(func):
249 249 assert stepname not in pushdiscoverymapping
250 250 pushdiscoverymapping[stepname] = func
251 251 pushdiscoveryorder.append(stepname)
252 252 return func
253 253 return dec
254 254
255 255 def _pushdiscovery(pushop):
256 256 """Run all discovery steps"""
257 257 for stepname in pushdiscoveryorder:
258 258 step = pushdiscoverymapping[stepname]
259 259 step(pushop)
260 260
261 261 @pushdiscovery('changeset')
262 262 def _pushdiscoverychangeset(pushop):
263 263 """discover the changeset that need to be pushed"""
264 264 unfi = pushop.repo.unfiltered()
265 265 fci = discovery.findcommonincoming
266 266 commoninc = fci(unfi, pushop.remote, force=pushop.force)
267 267 common, inc, remoteheads = commoninc
268 268 fco = discovery.findcommonoutgoing
269 269 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
270 270 commoninc=commoninc, force=pushop.force)
271 271 pushop.outgoing = outgoing
272 272 pushop.remoteheads = remoteheads
273 273 pushop.incoming = inc
274 274
275 275 @pushdiscovery('phase')
276 276 def _pushdiscoveryphase(pushop):
277 277 """discover the phase that needs to be pushed
278 278
279 279 (computed for both success and failure case for changesets push)"""
280 280 outgoing = pushop.outgoing
281 281 unfi = pushop.repo.unfiltered()
282 282 remotephases = pushop.remote.listkeys('phases')
283 283 publishing = remotephases.get('publishing', False)
284 284 ana = phases.analyzeremotephases(pushop.repo,
285 285 pushop.fallbackheads,
286 286 remotephases)
287 287 pheads, droots = ana
288 288 extracond = ''
289 289 if not publishing:
290 290 extracond = ' and public()'
291 291 revset = 'heads((%%ln::%%ln) %s)' % extracond
292 292 # Get the list of all revs draft on remote by public here.
293 293 # XXX Beware that revset break if droots is not strictly
294 294 # XXX root we may want to ensure it is but it is costly
295 295 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
296 296 if not outgoing.missing:
297 297 future = fallback
298 298 else:
299 299 # adds changeset we are going to push as draft
300 300 #
301 301 # should not be necessary for pushblishing server, but because of an
302 302 # issue fixed in xxxxx we have to do it anyway.
303 303 fdroots = list(unfi.set('roots(%ln + %ln::)',
304 304 outgoing.missing, droots))
305 305 fdroots = [f.node() for f in fdroots]
306 306 future = list(unfi.set(revset, fdroots, pushop.futureheads))
307 307 pushop.outdatedphases = future
308 308 pushop.fallbackoutdatedphases = fallback
309 309
310 310 @pushdiscovery('obsmarker')
311 311 def _pushdiscoveryobsmarkers(pushop):
312 312 if (obsolete._enabled
313 313 and pushop.repo.obsstore
314 314 and 'obsolete' in pushop.remote.listkeys('namespaces')):
315 315 repo = pushop.repo
316 316 # very naive computation, that can be quite expensive on big repo.
317 317 # However: evolution is currently slow on them anyway.
318 318 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
319 319 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
320 320
321 321 @pushdiscovery('bookmarks')
322 322 def _pushdiscoverybookmarks(pushop):
323 323 ui = pushop.ui
324 324 repo = pushop.repo.unfiltered()
325 325 remote = pushop.remote
326 326 ui.debug("checking for updated bookmarks\n")
327 327 ancestors = ()
328 328 if pushop.revs:
329 329 revnums = map(repo.changelog.rev, pushop.revs)
330 330 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
331 331 remotebookmark = remote.listkeys('bookmarks')
332 332
333 333 explicit = set(pushop.bookmarks)
334 334
335 335 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
336 336 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
337 337 for b, scid, dcid in advsrc:
338 338 if b in explicit:
339 339 explicit.remove(b)
340 340 if not ancestors or repo[scid].rev() in ancestors:
341 341 pushop.outbookmarks.append((b, dcid, scid))
342 342 # search added bookmark
343 343 for b, scid, dcid in addsrc:
344 344 if b in explicit:
345 345 explicit.remove(b)
346 346 pushop.outbookmarks.append((b, '', scid))
347 347 # search for overwritten bookmark
348 348 for b, scid, dcid in advdst + diverge + differ:
349 349 if b in explicit:
350 350 explicit.remove(b)
351 351 pushop.outbookmarks.append((b, dcid, scid))
352 352 # search for bookmark to delete
353 353 for b, scid, dcid in adddst:
354 354 if b in explicit:
355 355 explicit.remove(b)
356 356 # treat as "deleted locally"
357 357 pushop.outbookmarks.append((b, dcid, ''))
358 358
359 359 if explicit:
360 360 explicit = sorted(explicit)
361 361 # we should probably list all of them
362 362 ui.warn(_('bookmark %s does not exist on the local '
363 363 'or remote repository!\n') % explicit[0])
364 364 pushop.bkresult = 2
365 365
366 366 pushop.outbookmarks.sort()
367 367
368 368 def _pushcheckoutgoing(pushop):
369 369 outgoing = pushop.outgoing
370 370 unfi = pushop.repo.unfiltered()
371 371 if not outgoing.missing:
372 372 # nothing to push
373 373 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
374 374 return False
375 375 # something to push
376 376 if not pushop.force:
377 377 # if repo.obsstore == False --> no obsolete
378 378 # then, save the iteration
379 379 if unfi.obsstore:
380 380 # this message are here for 80 char limit reason
381 381 mso = _("push includes obsolete changeset: %s!")
382 382 mst = {"unstable": _("push includes unstable changeset: %s!"),
383 383 "bumped": _("push includes bumped changeset: %s!"),
384 384 "divergent": _("push includes divergent changeset: %s!")}
385 385 # If we are to push if there is at least one
386 386 # obsolete or unstable changeset in missing, at
387 387 # least one of the missinghead will be obsolete or
388 388 # unstable. So checking heads only is ok
389 389 for node in outgoing.missingheads:
390 390 ctx = unfi[node]
391 391 if ctx.obsolete():
392 392 raise util.Abort(mso % ctx)
393 393 elif ctx.troubled():
394 394 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
395 395 newbm = pushop.ui.configlist('bookmarks', 'pushing')
396 396 discovery.checkheads(unfi, pushop.remote, outgoing,
397 397 pushop.remoteheads,
398 398 pushop.newbranch,
399 399 bool(pushop.incoming),
400 400 newbm)
401 401 return True
402 402
403 403 # List of names of steps to perform for an outgoing bundle2, order matters.
404 404 b2partsgenorder = []
405 405
406 406 # Mapping between step name and function
407 407 #
408 408 # This exists to help extensions wrap steps if necessary
409 409 b2partsgenmapping = {}
410 410
411 411 def b2partsgenerator(stepname):
412 412 """decorator for function generating bundle2 part
413 413
414 414 The function is added to the step -> function mapping and appended to the
415 415 list of steps. Beware that decorated functions will be added in order
416 416 (this may matter).
417 417
418 418 You can only use this decorator for new steps, if you want to wrap a step
419 419 from an extension, attack the b2partsgenmapping dictionary directly."""
420 420 def dec(func):
421 421 assert stepname not in b2partsgenmapping
422 422 b2partsgenmapping[stepname] = func
423 423 b2partsgenorder.append(stepname)
424 424 return func
425 425 return dec
426 426
427 427 @b2partsgenerator('changeset')
428 428 def _pushb2ctx(pushop, bundler):
429 429 """handle changegroup push through bundle2
430 430
431 431 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
432 432 """
433 433 if 'changesets' in pushop.stepsdone:
434 434 return
435 435 pushop.stepsdone.add('changesets')
436 436 # Send known heads to the server for race detection.
437 437 if not _pushcheckoutgoing(pushop):
438 438 return
439 439 pushop.repo.prepushoutgoinghooks(pushop.repo,
440 440 pushop.remote,
441 441 pushop.outgoing)
442 442 if not pushop.force:
443 443 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
444 444 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing)
445 445 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
446 446 def handlereply(op):
447 447 """extract addchangroup returns from server reply"""
448 448 cgreplies = op.records.getreplies(cgpart.id)
449 449 assert len(cgreplies['changegroup']) == 1
450 450 pushop.cgresult = cgreplies['changegroup'][0]['return']
451 451 return handlereply
452 452
453 453 @b2partsgenerator('phase')
454 454 def _pushb2phases(pushop, bundler):
455 455 """handle phase push through bundle2"""
456 456 if 'phases' in pushop.stepsdone:
457 457 return
458 458 b2caps = bundle2.bundle2caps(pushop.remote)
459 459 if not 'b2x:pushkey' in b2caps:
460 460 return
461 461 pushop.stepsdone.add('phases')
462 462 part2node = []
463 463 enc = pushkey.encode
464 464 for newremotehead in pushop.outdatedphases:
465 465 part = bundler.newpart('b2x:pushkey')
466 466 part.addparam('namespace', enc('phases'))
467 467 part.addparam('key', enc(newremotehead.hex()))
468 468 part.addparam('old', enc(str(phases.draft)))
469 469 part.addparam('new', enc(str(phases.public)))
470 470 part2node.append((part.id, newremotehead))
471 471 def handlereply(op):
472 472 for partid, node in part2node:
473 473 partrep = op.records.getreplies(partid)
474 474 results = partrep['pushkey']
475 475 assert len(results) <= 1
476 476 msg = None
477 477 if not results:
478 478 msg = _('server ignored update of %s to public!\n') % node
479 479 elif not int(results[0]['return']):
480 480 msg = _('updating %s to public failed!\n') % node
481 481 if msg is not None:
482 482 pushop.ui.warn(msg)
483 483 return handlereply
484 484
485 485 @b2partsgenerator('obsmarkers')
486 486 def _pushb2obsmarkers(pushop, bundler):
487 487 if 'obsmarkers' in pushop.stepsdone:
488 488 return
489 489 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
490 490 if obsolete.commonversion(remoteversions) is None:
491 491 return
492 492 pushop.stepsdone.add('obsmarkers')
493 493 if pushop.outobsmarkers:
494 494 buildobsmarkerspart(bundler, pushop.outobsmarkers)
495 495
496 496 @b2partsgenerator('bookmarks')
497 497 def _pushb2bookmarks(pushop, bundler):
498 498 """handle phase push through bundle2"""
499 499 if 'bookmarks' in pushop.stepsdone:
500 500 return
501 501 b2caps = bundle2.bundle2caps(pushop.remote)
502 502 if 'b2x:pushkey' not in b2caps:
503 503 return
504 504 pushop.stepsdone.add('bookmarks')
505 505 part2book = []
506 506 enc = pushkey.encode
507 507 for book, old, new in pushop.outbookmarks:
508 508 part = bundler.newpart('b2x:pushkey')
509 509 part.addparam('namespace', enc('bookmarks'))
510 510 part.addparam('key', enc(book))
511 511 part.addparam('old', enc(old))
512 512 part.addparam('new', enc(new))
513 513 action = 'update'
514 514 if not old:
515 515 action = 'export'
516 516 elif not new:
517 517 action = 'delete'
518 518 part2book.append((part.id, book, action))
519 519
520 520
521 521 def handlereply(op):
522 522 ui = pushop.ui
523 523 for partid, book, action in part2book:
524 524 partrep = op.records.getreplies(partid)
525 525 results = partrep['pushkey']
526 526 assert len(results) <= 1
527 527 if not results:
528 528 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
529 529 else:
530 530 ret = int(results[0]['return'])
531 531 if ret:
532 532 ui.status(bookmsgmap[action][0] % book)
533 533 else:
534 534 ui.warn(bookmsgmap[action][1] % book)
535 535 if pushop.bkresult is not None:
536 536 pushop.bkresult = 1
537 537 return handlereply
538 538
539 539
540 540 def _pushbundle2(pushop):
541 541 """push data to the remote using bundle2
542 542
543 543 The only currently supported type of data is changegroup but this will
544 544 evolve in the future."""
545 545 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
546 546 # create reply capability
547 547 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
548 548 bundler.newpart('b2x:replycaps', data=capsblob)
549 549 replyhandlers = []
550 550 for partgenname in b2partsgenorder:
551 551 partgen = b2partsgenmapping[partgenname]
552 552 ret = partgen(pushop, bundler)
553 553 if callable(ret):
554 554 replyhandlers.append(ret)
555 555 # do not push if nothing to push
556 556 if bundler.nbparts <= 1:
557 557 return
558 558 stream = util.chunkbuffer(bundler.getchunks())
559 559 try:
560 560 reply = pushop.remote.unbundle(stream, ['force'], 'push')
561 561 except error.BundleValueError, exc:
562 562 raise util.Abort('missing support for %s' % exc)
563 563 try:
564 564 op = bundle2.processbundle(pushop.repo, reply)
565 565 except error.BundleValueError, exc:
566 566 raise util.Abort('missing support for %s' % exc)
567 567 for rephand in replyhandlers:
568 568 rephand(op)
569 569
570 570 def _pushchangeset(pushop):
571 571 """Make the actual push of changeset bundle to remote repo"""
572 572 if 'changesets' in pushop.stepsdone:
573 573 return
574 574 pushop.stepsdone.add('changesets')
575 575 if not _pushcheckoutgoing(pushop):
576 576 return
577 577 pushop.repo.prepushoutgoinghooks(pushop.repo,
578 578 pushop.remote,
579 579 pushop.outgoing)
580 580 outgoing = pushop.outgoing
581 581 unbundle = pushop.remote.capable('unbundle')
582 582 # TODO: get bundlecaps from remote
583 583 bundlecaps = None
584 584 # create a changegroup from local
585 585 if pushop.revs is None and not (outgoing.excluded
586 586 or pushop.repo.changelog.filteredrevs):
587 587 # push everything,
588 588 # use the fast path, no race possible on push
589 589 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
590 590 cg = changegroup.getsubset(pushop.repo,
591 591 outgoing,
592 592 bundler,
593 593 'push',
594 594 fastpath=True)
595 595 else:
596 596 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
597 597 bundlecaps)
598 598
599 599 # apply changegroup to remote
600 600 if unbundle:
601 601 # local repo finds heads on server, finds out what
602 602 # revs it must push. once revs transferred, if server
603 603 # finds it has different heads (someone else won
604 604 # commit/push race), server aborts.
605 605 if pushop.force:
606 606 remoteheads = ['force']
607 607 else:
608 608 remoteheads = pushop.remoteheads
609 609 # ssh: return remote's addchangegroup()
610 610 # http: return remote's addchangegroup() or 0 for error
611 611 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
612 612 pushop.repo.url())
613 613 else:
614 614 # we return an integer indicating remote head count
615 615 # change
616 616 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
617 617 pushop.repo.url())
618 618
619 619 def _pushsyncphase(pushop):
620 620 """synchronise phase information locally and remotely"""
621 621 cheads = pushop.commonheads
622 622 # even when we don't push, exchanging phase data is useful
623 623 remotephases = pushop.remote.listkeys('phases')
624 624 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
625 625 and remotephases # server supports phases
626 626 and pushop.cgresult is None # nothing was pushed
627 627 and remotephases.get('publishing', False)):
628 628 # When:
629 629 # - this is a subrepo push
630 630 # - and remote support phase
631 631 # - and no changeset was pushed
632 632 # - and remote is publishing
633 633 # We may be in issue 3871 case!
634 634 # We drop the possible phase synchronisation done by
635 635 # courtesy to publish changesets possibly locally draft
636 636 # on the remote.
637 637 remotephases = {'publishing': 'True'}
638 638 if not remotephases: # old server or public only reply from non-publishing
639 639 _localphasemove(pushop, cheads)
640 640 # don't push any phase data as there is nothing to push
641 641 else:
642 642 ana = phases.analyzeremotephases(pushop.repo, cheads,
643 643 remotephases)
644 644 pheads, droots = ana
645 645 ### Apply remote phase on local
646 646 if remotephases.get('publishing', False):
647 647 _localphasemove(pushop, cheads)
648 648 else: # publish = False
649 649 _localphasemove(pushop, pheads)
650 650 _localphasemove(pushop, cheads, phases.draft)
651 651 ### Apply local phase on remote
652 652
653 653 if pushop.cgresult:
654 654 if 'phases' in pushop.stepsdone:
655 655 # phases already pushed though bundle2
656 656 return
657 657 outdated = pushop.outdatedphases
658 658 else:
659 659 outdated = pushop.fallbackoutdatedphases
660 660
661 661 pushop.stepsdone.add('phases')
662 662
663 663 # filter heads already turned public by the push
664 664 outdated = [c for c in outdated if c.node() not in pheads]
665 665 b2caps = bundle2.bundle2caps(pushop.remote)
666 666 if 'b2x:pushkey' in b2caps:
667 667 # server supports bundle2, let's do a batched push through it
668 668 #
669 669 # This will eventually be unified with the changesets bundle2 push
670 670 bundler = bundle2.bundle20(pushop.ui, b2caps)
671 671 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
672 672 bundler.newpart('b2x:replycaps', data=capsblob)
673 673 part2node = []
674 674 enc = pushkey.encode
675 675 for newremotehead in outdated:
676 676 part = bundler.newpart('b2x:pushkey')
677 677 part.addparam('namespace', enc('phases'))
678 678 part.addparam('key', enc(newremotehead.hex()))
679 679 part.addparam('old', enc(str(phases.draft)))
680 680 part.addparam('new', enc(str(phases.public)))
681 681 part2node.append((part.id, newremotehead))
682 682 stream = util.chunkbuffer(bundler.getchunks())
683 683 try:
684 684 reply = pushop.remote.unbundle(stream, ['force'], 'push')
685 685 op = bundle2.processbundle(pushop.repo, reply)
686 686 except error.BundleValueError, exc:
687 687 raise util.Abort('missing support for %s' % exc)
688 688 for partid, node in part2node:
689 689 partrep = op.records.getreplies(partid)
690 690 results = partrep['pushkey']
691 691 assert len(results) <= 1
692 692 msg = None
693 693 if not results:
694 694 msg = _('server ignored update of %s to public!\n') % node
695 695 elif not int(results[0]['return']):
696 696 msg = _('updating %s to public failed!\n') % node
697 697 if msg is not None:
698 698 pushop.ui.warn(msg)
699 699
700 700 else:
701 701 # fallback to independant pushkey command
702 702 for newremotehead in outdated:
703 703 r = pushop.remote.pushkey('phases',
704 704 newremotehead.hex(),
705 705 str(phases.draft),
706 706 str(phases.public))
707 707 if not r:
708 708 pushop.ui.warn(_('updating %s to public failed!\n')
709 709 % newremotehead)
710 710
711 711 def _localphasemove(pushop, nodes, phase=phases.public):
712 712 """move <nodes> to <phase> in the local source repo"""
713 713 if pushop.locallocked:
714 714 tr = pushop.repo.transaction('push-phase-sync')
715 715 try:
716 716 phases.advanceboundary(pushop.repo, tr, phase, nodes)
717 717 tr.close()
718 718 finally:
719 719 tr.release()
720 720 else:
721 721 # repo is not locked, do not change any phases!
722 722 # Informs the user that phases should have been moved when
723 723 # applicable.
724 724 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
725 725 phasestr = phases.phasenames[phase]
726 726 if actualmoves:
727 727 pushop.ui.status(_('cannot lock source repo, skipping '
728 728 'local %s phase update\n') % phasestr)
729 729
730 730 def _pushobsolete(pushop):
731 731 """utility function to push obsolete markers to a remote"""
732 732 if 'obsmarkers' in pushop.stepsdone:
733 733 return
734 734 pushop.ui.debug('try to push obsolete markers to remote\n')
735 735 repo = pushop.repo
736 736 remote = pushop.remote
737 737 pushop.stepsdone.add('obsmarkers')
738 738 if pushop.outobsmarkers:
739 739 rslts = []
740 740 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
741 741 for key in sorted(remotedata, reverse=True):
742 742 # reverse sort to ensure we end with dump0
743 743 data = remotedata[key]
744 744 rslts.append(remote.pushkey('obsolete', key, '', data))
745 745 if [r for r in rslts if not r]:
746 746 msg = _('failed to push some obsolete markers!\n')
747 747 repo.ui.warn(msg)
748 748
749 749 def _pushbookmark(pushop):
750 750 """Update bookmark position on remote"""
751 751 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
752 752 return
753 753 pushop.stepsdone.add('bookmarks')
754 754 ui = pushop.ui
755 755 remote = pushop.remote
756 756
757 757 for b, old, new in pushop.outbookmarks:
758 758 action = 'update'
759 759 if not old:
760 760 action = 'export'
761 761 elif not new:
762 762 action = 'delete'
763 763 if remote.pushkey('bookmarks', b, old, new):
764 764 ui.status(bookmsgmap[action][0] % b)
765 765 else:
766 766 ui.warn(bookmsgmap[action][1] % b)
767 767 # discovery can have set the value form invalid entry
768 768 if pushop.bkresult is not None:
769 769 pushop.bkresult = 1
770 770
771 771 class pulloperation(object):
772 772 """A object that represent a single pull operation
773 773
774 774 It purpose is to carry push related state and very common operation.
775 775
776 776 A new should be created at the beginning of each pull and discarded
777 777 afterward.
778 778 """
779 779
780 780 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
781 781 # repo we pull into
782 782 self.repo = repo
783 783 # repo we pull from
784 784 self.remote = remote
785 785 # revision we try to pull (None is "all")
786 786 self.heads = heads
787 787 # bookmark pulled explicitly
788 788 self.explicitbookmarks = bookmarks
789 789 # do we force pull?
790 790 self.force = force
791 791 # the name the pull transaction
792 792 self._trname = 'pull\n' + util.hidepassword(remote.url())
793 793 # hold the transaction once created
794 794 self._tr = None
795 795 # set of common changeset between local and remote before pull
796 796 self.common = None
797 797 # set of pulled head
798 798 self.rheads = None
799 799 # list of missing changeset to fetch remotely
800 800 self.fetch = None
801 801 # remote bookmarks data
802 802 self.remotebookmarks = None
803 803 # result of changegroup pulling (used as return code by pull)
804 804 self.cgresult = None
805 805 # list of step remaining todo (related to future bundle2 usage)
806 806 self.todosteps = set(['changegroup', 'phases', 'obsmarkers',
807 807 'bookmarks'])
808 808
809 809 @util.propertycache
810 810 def pulledsubset(self):
811 811 """heads of the set of changeset target by the pull"""
812 812 # compute target subset
813 813 if self.heads is None:
814 814 # We pulled every thing possible
815 815 # sync on everything common
816 816 c = set(self.common)
817 817 ret = list(self.common)
818 818 for n in self.rheads:
819 819 if n not in c:
820 820 ret.append(n)
821 821 return ret
822 822 else:
823 823 # We pulled a specific subset
824 824 # sync on this subset
825 825 return self.heads
826 826
827 827 def gettransaction(self):
828 828 """get appropriate pull transaction, creating it if needed"""
829 829 if self._tr is None:
830 830 self._tr = self.repo.transaction(self._trname)
831 831 return self._tr
832 832
833 833 def closetransaction(self):
834 834 """close transaction if created"""
835 835 if self._tr is not None:
836 836 self._tr.close()
837 837
838 838 def releasetransaction(self):
839 839 """release transaction if created"""
840 840 if self._tr is not None:
841 841 self._tr.release()
842 842
843 843 def pull(repo, remote, heads=None, force=False, bookmarks=()):
844 844 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
845 845 if pullop.remote.local():
846 846 missing = set(pullop.remote.requirements) - pullop.repo.supported
847 847 if missing:
848 848 msg = _("required features are not"
849 849 " supported in the destination:"
850 850 " %s") % (', '.join(sorted(missing)))
851 851 raise util.Abort(msg)
852 852
853 853 pullop.remotebookmarks = remote.listkeys('bookmarks')
854 854 lock = pullop.repo.lock()
855 855 try:
856 856 _pulldiscovery(pullop)
857 857 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
858 858 and pullop.remote.capable('bundle2-exp')):
859 859 _pullbundle2(pullop)
860 860 _pullchangeset(pullop)
861 861 _pullphase(pullop)
862 862 _pullbookmarks(pullop)
863 863 _pullobsolete(pullop)
864 864 pullop.closetransaction()
865 865 finally:
866 866 pullop.releasetransaction()
867 867 lock.release()
868 868
869 869 return pullop.cgresult
870 870
871 871 def _pulldiscovery(pullop):
872 872 """discovery phase for the pull
873 873
874 874 Current handle changeset discovery only, will change handle all discovery
875 875 at some point."""
876 876 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
877 877 pullop.remote,
878 878 heads=pullop.heads,
879 879 force=pullop.force)
880 880 pullop.common, pullop.fetch, pullop.rheads = tmp
881 881
882 882 def _pullbundle2(pullop):
883 883 """pull data using bundle2
884 884
885 885 For now, the only supported data are changegroup."""
886 886 remotecaps = bundle2.bundle2caps(pullop.remote)
887 887 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
888 888 # pulling changegroup
889 889 pullop.todosteps.remove('changegroup')
890 890
891 891 kwargs['common'] = pullop.common
892 892 kwargs['heads'] = pullop.heads or pullop.rheads
893 893 kwargs['cg'] = pullop.fetch
894 894 if 'b2x:listkeys' in remotecaps:
895 895 kwargs['listkeys'] = ['phase', 'bookmarks']
896 896 if not pullop.fetch:
897 897 pullop.repo.ui.status(_("no changes found\n"))
898 898 pullop.cgresult = 0
899 899 else:
900 900 if pullop.heads is None and list(pullop.common) == [nullid]:
901 901 pullop.repo.ui.status(_("requesting all changes\n"))
902 902 if obsolete._enabled:
903 903 remoteversions = bundle2.obsmarkersversion(remotecaps)
904 904 if obsolete.commonversion(remoteversions) is not None:
905 905 kwargs['obsmarkers'] = True
906 906 pullop.todosteps.remove('obsmarkers')
907 907 _pullbundle2extraprepare(pullop, kwargs)
908 908 if kwargs.keys() == ['format']:
909 909 return # nothing to pull
910 910 bundle = pullop.remote.getbundle('pull', **kwargs)
911 911 try:
912 912 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
913 913 except error.BundleValueError, exc:
914 914 raise util.Abort('missing support for %s' % exc)
915 915
916 916 if pullop.fetch:
917 917 assert len(op.records['changegroup']) == 1
918 918 pullop.cgresult = op.records['changegroup'][0]['return']
919 919
920 920 # processing phases change
921 921 for namespace, value in op.records['listkeys']:
922 922 if namespace == 'phases':
923 923 _pullapplyphases(pullop, value)
924 924
925 925 # processing bookmark update
926 926 for namespace, value in op.records['listkeys']:
927 927 if namespace == 'bookmarks':
928 928 pullop.remotebookmarks = value
929 929 _pullbookmarks(pullop)
930 930
931 931 def _pullbundle2extraprepare(pullop, kwargs):
932 932 """hook function so that extensions can extend the getbundle call"""
933 933 pass
934 934
935 935 def _pullchangeset(pullop):
936 936 """pull changeset from unbundle into the local repo"""
937 937 # We delay the open of the transaction as late as possible so we
938 938 # don't open transaction for nothing or you break future useful
939 939 # rollback call
940 940 if 'changegroup' not in pullop.todosteps:
941 941 return
942 942 pullop.todosteps.remove('changegroup')
943 943 if not pullop.fetch:
944 944 pullop.repo.ui.status(_("no changes found\n"))
945 945 pullop.cgresult = 0
946 946 return
947 947 pullop.gettransaction()
948 948 if pullop.heads is None and list(pullop.common) == [nullid]:
949 949 pullop.repo.ui.status(_("requesting all changes\n"))
950 950 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
951 951 # issue1320, avoid a race if remote changed after discovery
952 952 pullop.heads = pullop.rheads
953 953
954 954 if pullop.remote.capable('getbundle'):
955 955 # TODO: get bundlecaps from remote
956 956 cg = pullop.remote.getbundle('pull', common=pullop.common,
957 957 heads=pullop.heads or pullop.rheads)
958 958 elif pullop.heads is None:
959 959 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
960 960 elif not pullop.remote.capable('changegroupsubset'):
961 961 raise util.Abort(_("partial pull cannot be done because "
962 962 "other repository doesn't support "
963 963 "changegroupsubset."))
964 964 else:
965 965 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
966 966 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
967 967 pullop.remote.url())
968 968
969 969 def _pullphase(pullop):
970 970 # Get remote phases data from remote
971 971 if 'phases' not in pullop.todosteps:
972 972 return
973 973 remotephases = pullop.remote.listkeys('phases')
974 974 _pullapplyphases(pullop, remotephases)
975 975
976 976 def _pullapplyphases(pullop, remotephases):
977 977 """apply phase movement from observed remote state"""
978 978 pullop.todosteps.remove('phases')
979 979 publishing = bool(remotephases.get('publishing', False))
980 980 if remotephases and not publishing:
981 981 # remote is new and unpublishing
982 982 pheads, _dr = phases.analyzeremotephases(pullop.repo,
983 983 pullop.pulledsubset,
984 984 remotephases)
985 985 dheads = pullop.pulledsubset
986 986 else:
987 987 # Remote is old or publishing all common changesets
988 988 # should be seen as public
989 989 pheads = pullop.pulledsubset
990 990 dheads = []
991 991 unfi = pullop.repo.unfiltered()
992 992 phase = unfi._phasecache.phase
993 993 rev = unfi.changelog.nodemap.get
994 994 public = phases.public
995 995 draft = phases.draft
996 996
997 997 # exclude changesets already public locally and update the others
998 998 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
999 999 if pheads:
1000 1000 tr = pullop.gettransaction()
1001 1001 phases.advanceboundary(pullop.repo, tr, public, pheads)
1002 1002
1003 1003 # exclude changesets already draft locally and update the others
1004 1004 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1005 1005 if dheads:
1006 1006 tr = pullop.gettransaction()
1007 1007 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1008 1008
1009 1009 def _pullbookmarks(pullop):
1010 1010 """process the remote bookmark information to update the local one"""
1011 1011 if 'bookmarks' not in pullop.todosteps:
1012 1012 return
1013 1013 pullop.todosteps.remove('bookmarks')
1014 1014 repo = pullop.repo
1015 1015 remotebookmarks = pullop.remotebookmarks
1016 1016 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1017 1017 pullop.remote.url(),
1018 pullop.gettransaction,
1018 1019 explicit=pullop.explicitbookmarks)
1019 1020
1020 1021 def _pullobsolete(pullop):
1021 1022 """utility function to pull obsolete markers from a remote
1022 1023
1023 1024 The `gettransaction` is function that return the pull transaction, creating
1024 1025 one if necessary. We return the transaction to inform the calling code that
1025 1026 a new transaction have been created (when applicable).
1026 1027
1027 1028 Exists mostly to allow overriding for experimentation purpose"""
1028 1029 if 'obsmarkers' not in pullop.todosteps:
1029 1030 return
1030 1031 pullop.todosteps.remove('obsmarkers')
1031 1032 tr = None
1032 1033 if obsolete._enabled:
1033 1034 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1034 1035 remoteobs = pullop.remote.listkeys('obsolete')
1035 1036 if 'dump0' in remoteobs:
1036 1037 tr = pullop.gettransaction()
1037 1038 for key in sorted(remoteobs, reverse=True):
1038 1039 if key.startswith('dump'):
1039 1040 data = base85.b85decode(remoteobs[key])
1040 1041 pullop.repo.obsstore.mergemarkers(tr, data)
1041 1042 pullop.repo.invalidatevolatilesets()
1042 1043 return tr
1043 1044
1044 1045 def caps20to10(repo):
1045 1046 """return a set with appropriate options to use bundle20 during getbundle"""
1046 1047 caps = set(['HG2X'])
1047 1048 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1048 1049 caps.add('bundle2=' + urllib.quote(capsblob))
1049 1050 return caps
1050 1051
1051 1052 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1052 1053 getbundle2partsorder = []
1053 1054
1054 1055 # Mapping between step name and function
1055 1056 #
1056 1057 # This exists to help extensions wrap steps if necessary
1057 1058 getbundle2partsmapping = {}
1058 1059
1059 1060 def getbundle2partsgenerator(stepname):
1060 1061 """decorator for function generating bundle2 part for getbundle
1061 1062
1062 1063 The function is added to the step -> function mapping and appended to the
1063 1064 list of steps. Beware that decorated functions will be added in order
1064 1065 (this may matter).
1065 1066
1066 1067 You can only use this decorator for new steps, if you want to wrap a step
1067 1068 from an extension, attack the getbundle2partsmapping dictionary directly."""
1068 1069 def dec(func):
1069 1070 assert stepname not in getbundle2partsmapping
1070 1071 getbundle2partsmapping[stepname] = func
1071 1072 getbundle2partsorder.append(stepname)
1072 1073 return func
1073 1074 return dec
1074 1075
1075 1076 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1076 1077 **kwargs):
1077 1078 """return a full bundle (with potentially multiple kind of parts)
1078 1079
1079 1080 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
1080 1081 passed. For now, the bundle can contain only changegroup, but this will
1081 1082 changes when more part type will be available for bundle2.
1082 1083
1083 1084 This is different from changegroup.getchangegroup that only returns an HG10
1084 1085 changegroup bundle. They may eventually get reunited in the future when we
1085 1086 have a clearer idea of the API we what to query different data.
1086 1087
1087 1088 The implementation is at a very early stage and will get massive rework
1088 1089 when the API of bundle is refined.
1089 1090 """
1090 1091 # bundle10 case
1091 1092 if bundlecaps is None or 'HG2X' not in bundlecaps:
1092 1093 if bundlecaps and not kwargs.get('cg', True):
1093 1094 raise ValueError(_('request for bundle10 must include changegroup'))
1094 1095
1095 1096 if kwargs:
1096 1097 raise ValueError(_('unsupported getbundle arguments: %s')
1097 1098 % ', '.join(sorted(kwargs.keys())))
1098 1099 return changegroup.getchangegroup(repo, source, heads=heads,
1099 1100 common=common, bundlecaps=bundlecaps)
1100 1101
1101 1102 # bundle20 case
1102 1103 b2caps = {}
1103 1104 for bcaps in bundlecaps:
1104 1105 if bcaps.startswith('bundle2='):
1105 1106 blob = urllib.unquote(bcaps[len('bundle2='):])
1106 1107 b2caps.update(bundle2.decodecaps(blob))
1107 1108 bundler = bundle2.bundle20(repo.ui, b2caps)
1108 1109
1109 1110 for name in getbundle2partsorder:
1110 1111 func = getbundle2partsmapping[name]
1111 1112 kwargs['heads'] = heads
1112 1113 kwargs['common'] = common
1113 1114 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1114 1115 **kwargs)
1115 1116
1116 1117 return util.chunkbuffer(bundler.getchunks())
1117 1118
1118 1119 @getbundle2partsgenerator('changegroup')
1119 1120 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1120 1121 b2caps=None, heads=None, common=None, **kwargs):
1121 1122 """add a changegroup part to the requested bundle"""
1122 1123 cg = None
1123 1124 if kwargs.get('cg', True):
1124 1125 # build changegroup bundle here.
1125 1126 cg = changegroup.getchangegroup(repo, source, heads=heads,
1126 1127 common=common, bundlecaps=bundlecaps)
1127 1128
1128 1129 if cg:
1129 1130 bundler.newpart('b2x:changegroup', data=cg.getchunks())
1130 1131
1131 1132 @getbundle2partsgenerator('listkeys')
1132 1133 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1133 1134 b2caps=None, **kwargs):
1134 1135 """add parts containing listkeys namespaces to the requested bundle"""
1135 1136 listkeys = kwargs.get('listkeys', ())
1136 1137 for namespace in listkeys:
1137 1138 part = bundler.newpart('b2x:listkeys')
1138 1139 part.addparam('namespace', namespace)
1139 1140 keys = repo.listkeys(namespace).items()
1140 1141 part.data = pushkey.encodekeys(keys)
1141 1142
1142 1143 @getbundle2partsgenerator('obsmarkers')
1143 1144 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1144 1145 b2caps=None, heads=None, **kwargs):
1145 1146 """add an obsolescence markers part to the requested bundle"""
1146 1147 if kwargs.get('obsmarkers', False):
1147 1148 if heads is None:
1148 1149 heads = repo.heads()
1149 1150 subset = [c.node() for c in repo.set('::%ln', heads)]
1150 1151 markers = repo.obsstore.relevantmarkers(subset)
1151 1152 buildobsmarkerspart(bundler, markers)
1152 1153
1153 1154 @getbundle2partsgenerator('extra')
1154 1155 def _getbundleextrapart(bundler, repo, source, bundlecaps=None,
1155 1156 b2caps=None, **kwargs):
1156 1157 """hook function to let extensions add parts to the requested bundle"""
1157 1158 pass
1158 1159
1159 1160 def check_heads(repo, their_heads, context):
1160 1161 """check if the heads of a repo have been modified
1161 1162
1162 1163 Used by peer for unbundling.
1163 1164 """
1164 1165 heads = repo.heads()
1165 1166 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1166 1167 if not (their_heads == ['force'] or their_heads == heads or
1167 1168 their_heads == ['hashed', heads_hash]):
1168 1169 # someone else committed/pushed/unbundled while we
1169 1170 # were transferring data
1170 1171 raise error.PushRaced('repository changed while %s - '
1171 1172 'please try again' % context)
1172 1173
1173 1174 def unbundle(repo, cg, heads, source, url):
1174 1175 """Apply a bundle to a repo.
1175 1176
1176 1177 this function makes sure the repo is locked during the application and have
1177 1178 mechanism to check that no push race occurred between the creation of the
1178 1179 bundle and its application.
1179 1180
1180 1181 If the push was raced as PushRaced exception is raised."""
1181 1182 r = 0
1182 1183 # need a transaction when processing a bundle2 stream
1183 1184 tr = None
1184 1185 lock = repo.lock()
1185 1186 try:
1186 1187 check_heads(repo, heads, 'uploading changes')
1187 1188 # push can proceed
1188 1189 if util.safehasattr(cg, 'params'):
1189 1190 try:
1190 1191 tr = repo.transaction('unbundle')
1191 1192 tr.hookargs['bundle2-exp'] = '1'
1192 1193 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1193 1194 cl = repo.unfiltered().changelog
1194 1195 p = cl.writepending() and repo.root or ""
1195 1196 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1196 1197 url=url, pending=p, **tr.hookargs)
1197 1198 tr.close()
1198 1199 repo.hook('b2x-transactionclose', source=source, url=url,
1199 1200 **tr.hookargs)
1200 1201 except Exception, exc:
1201 1202 exc.duringunbundle2 = True
1202 1203 raise
1203 1204 else:
1204 1205 r = changegroup.addchangegroup(repo, cg, source, url)
1205 1206 finally:
1206 1207 if tr is not None:
1207 1208 tr.release()
1208 1209 lock.release()
1209 1210 return r
General Comments 0
You need to be logged in to leave comments. Login now