##// END OF EJS Templates
pull: perform bookmark updates in the transaction
Pierre-Yves David -
r22666:0f8120c1 default
parent child Browse files
Show More
@@ -1,435 +1,436 b''
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.node import hex, bin
9 from mercurial.node import hex, bin
10 from mercurial import encoding, error, util, obsolete
10 from mercurial import encoding, error, util, obsolete
11 import errno
11 import errno
12
12
13 class bmstore(dict):
13 class bmstore(dict):
14 """Storage for bookmarks.
14 """Storage for bookmarks.
15
15
16 This object should do all bookmark reads and writes, so that it's
16 This object should do all bookmark reads and writes, so that it's
17 fairly simple to replace the storage underlying bookmarks without
17 fairly simple to replace the storage underlying bookmarks without
18 having to clone the logic surrounding bookmarks.
18 having to clone the logic surrounding bookmarks.
19
19
20 This particular bmstore implementation stores bookmarks as
20 This particular bmstore implementation stores bookmarks as
21 {hash}\s{name}\n (the same format as localtags) in
21 {hash}\s{name}\n (the same format as localtags) in
22 .hg/bookmarks. The mapping is stored as {name: nodeid}.
22 .hg/bookmarks. The mapping is stored as {name: nodeid}.
23
23
24 This class does NOT handle the "current" bookmark state at this
24 This class does NOT handle the "current" bookmark state at this
25 time.
25 time.
26 """
26 """
27
27
28 def __init__(self, repo):
28 def __init__(self, repo):
29 dict.__init__(self)
29 dict.__init__(self)
30 self._repo = repo
30 self._repo = repo
31 try:
31 try:
32 for line in repo.vfs('bookmarks'):
32 for line in repo.vfs('bookmarks'):
33 line = line.strip()
33 line = line.strip()
34 if not line:
34 if not line:
35 continue
35 continue
36 if ' ' not in line:
36 if ' ' not in line:
37 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
37 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
38 % line)
38 % line)
39 continue
39 continue
40 sha, refspec = line.split(' ', 1)
40 sha, refspec = line.split(' ', 1)
41 refspec = encoding.tolocal(refspec)
41 refspec = encoding.tolocal(refspec)
42 try:
42 try:
43 self[refspec] = repo.changelog.lookup(sha)
43 self[refspec] = repo.changelog.lookup(sha)
44 except LookupError:
44 except LookupError:
45 pass
45 pass
46 except IOError, inst:
46 except IOError, inst:
47 if inst.errno != errno.ENOENT:
47 if inst.errno != errno.ENOENT:
48 raise
48 raise
49
49
50 def recordchange(self, tr):
50 def recordchange(self, tr):
51 """record that bookmarks have been changed in a transaction
51 """record that bookmarks have been changed in a transaction
52
52
53 The transaction is then responsible for updating the file content."""
53 The transaction is then responsible for updating the file content."""
54 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
54 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
55 vfs=self._repo.vfs)
55 vfs=self._repo.vfs)
56
56
57 def write(self):
57 def write(self):
58 '''Write bookmarks
58 '''Write bookmarks
59
59
60 Write the given bookmark => hash dictionary to the .hg/bookmarks file
60 Write the given bookmark => hash dictionary to the .hg/bookmarks file
61 in a format equal to those of localtags.
61 in a format equal to those of localtags.
62
62
63 We also store a backup of the previous state in undo.bookmarks that
63 We also store a backup of the previous state in undo.bookmarks that
64 can be copied back on rollback.
64 can be copied back on rollback.
65 '''
65 '''
66 repo = self._repo
66 repo = self._repo
67 if repo._bookmarkcurrent not in self:
67 if repo._bookmarkcurrent not in self:
68 unsetcurrent(repo)
68 unsetcurrent(repo)
69
69
70 wlock = repo.wlock()
70 wlock = repo.wlock()
71 try:
71 try:
72
72
73 file = repo.vfs('bookmarks', 'w', atomictemp=True)
73 file = repo.vfs('bookmarks', 'w', atomictemp=True)
74 self._write(file)
74 self._write(file)
75 file.close()
75 file.close()
76
76
77 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
77 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
78 try:
78 try:
79 repo.svfs.utime('00changelog.i', None)
79 repo.svfs.utime('00changelog.i', None)
80 except OSError:
80 except OSError:
81 pass
81 pass
82
82
83 finally:
83 finally:
84 wlock.release()
84 wlock.release()
85
85
86 def _write(self, fp):
86 def _write(self, fp):
87 for name, node in self.iteritems():
87 for name, node in self.iteritems():
88 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
88 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
89
89
90 def readcurrent(repo):
90 def readcurrent(repo):
91 '''Get the current bookmark
91 '''Get the current bookmark
92
92
93 If we use gittish branches we have a current bookmark that
93 If we use gittish branches we have a current bookmark that
94 we are on. This function returns the name of the bookmark. It
94 we are on. This function returns the name of the bookmark. It
95 is stored in .hg/bookmarks.current
95 is stored in .hg/bookmarks.current
96 '''
96 '''
97 mark = None
97 mark = None
98 try:
98 try:
99 file = repo.opener('bookmarks.current')
99 file = repo.opener('bookmarks.current')
100 except IOError, inst:
100 except IOError, inst:
101 if inst.errno != errno.ENOENT:
101 if inst.errno != errno.ENOENT:
102 raise
102 raise
103 return None
103 return None
104 try:
104 try:
105 # No readline() in osutil.posixfile, reading everything is cheap
105 # No readline() in osutil.posixfile, reading everything is cheap
106 mark = encoding.tolocal((file.readlines() or [''])[0])
106 mark = encoding.tolocal((file.readlines() or [''])[0])
107 if mark == '' or mark not in repo._bookmarks:
107 if mark == '' or mark not in repo._bookmarks:
108 mark = None
108 mark = None
109 finally:
109 finally:
110 file.close()
110 file.close()
111 return mark
111 return mark
112
112
113 def setcurrent(repo, mark):
113 def setcurrent(repo, mark):
114 '''Set the name of the bookmark that we are currently on
114 '''Set the name of the bookmark that we are currently on
115
115
116 Set the name of the bookmark that we are on (hg update <bookmark>).
116 Set the name of the bookmark that we are on (hg update <bookmark>).
117 The name is recorded in .hg/bookmarks.current
117 The name is recorded in .hg/bookmarks.current
118 '''
118 '''
119 if mark not in repo._bookmarks:
119 if mark not in repo._bookmarks:
120 raise AssertionError('bookmark %s does not exist!' % mark)
120 raise AssertionError('bookmark %s does not exist!' % mark)
121
121
122 current = repo._bookmarkcurrent
122 current = repo._bookmarkcurrent
123 if current == mark:
123 if current == mark:
124 return
124 return
125
125
126 wlock = repo.wlock()
126 wlock = repo.wlock()
127 try:
127 try:
128 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
128 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
129 file.write(encoding.fromlocal(mark))
129 file.write(encoding.fromlocal(mark))
130 file.close()
130 file.close()
131 finally:
131 finally:
132 wlock.release()
132 wlock.release()
133 repo._bookmarkcurrent = mark
133 repo._bookmarkcurrent = mark
134
134
135 def unsetcurrent(repo):
135 def unsetcurrent(repo):
136 wlock = repo.wlock()
136 wlock = repo.wlock()
137 try:
137 try:
138 try:
138 try:
139 repo.vfs.unlink('bookmarks.current')
139 repo.vfs.unlink('bookmarks.current')
140 repo._bookmarkcurrent = None
140 repo._bookmarkcurrent = None
141 except OSError, inst:
141 except OSError, inst:
142 if inst.errno != errno.ENOENT:
142 if inst.errno != errno.ENOENT:
143 raise
143 raise
144 finally:
144 finally:
145 wlock.release()
145 wlock.release()
146
146
147 def iscurrent(repo, mark=None, parents=None):
147 def iscurrent(repo, mark=None, parents=None):
148 '''Tell whether the current bookmark is also active
148 '''Tell whether the current bookmark is also active
149
149
150 I.e., the bookmark listed in .hg/bookmarks.current also points to a
150 I.e., the bookmark listed in .hg/bookmarks.current also points to a
151 parent of the working directory.
151 parent of the working directory.
152 '''
152 '''
153 if not mark:
153 if not mark:
154 mark = repo._bookmarkcurrent
154 mark = repo._bookmarkcurrent
155 if not parents:
155 if not parents:
156 parents = [p.node() for p in repo[None].parents()]
156 parents = [p.node() for p in repo[None].parents()]
157 marks = repo._bookmarks
157 marks = repo._bookmarks
158 return (mark in marks and marks[mark] in parents)
158 return (mark in marks and marks[mark] in parents)
159
159
160 def updatecurrentbookmark(repo, oldnode, curbranch):
160 def updatecurrentbookmark(repo, oldnode, curbranch):
161 try:
161 try:
162 return update(repo, oldnode, repo.branchtip(curbranch))
162 return update(repo, oldnode, repo.branchtip(curbranch))
163 except error.RepoLookupError:
163 except error.RepoLookupError:
164 if curbranch == "default": # no default branch!
164 if curbranch == "default": # no default branch!
165 return update(repo, oldnode, repo.lookup("tip"))
165 return update(repo, oldnode, repo.lookup("tip"))
166 else:
166 else:
167 raise util.Abort(_("branch %s not found") % curbranch)
167 raise util.Abort(_("branch %s not found") % curbranch)
168
168
169 def deletedivergent(repo, deletefrom, bm):
169 def deletedivergent(repo, deletefrom, bm):
170 '''Delete divergent versions of bm on nodes in deletefrom.
170 '''Delete divergent versions of bm on nodes in deletefrom.
171
171
172 Return True if at least one bookmark was deleted, False otherwise.'''
172 Return True if at least one bookmark was deleted, False otherwise.'''
173 deleted = False
173 deleted = False
174 marks = repo._bookmarks
174 marks = repo._bookmarks
175 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
175 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
176 for mark in divergent:
176 for mark in divergent:
177 if mark == '@' or '@' not in mark:
177 if mark == '@' or '@' not in mark:
178 # can't be divergent by definition
178 # can't be divergent by definition
179 continue
179 continue
180 if mark and marks[mark] in deletefrom:
180 if mark and marks[mark] in deletefrom:
181 if mark != bm:
181 if mark != bm:
182 del marks[mark]
182 del marks[mark]
183 deleted = True
183 deleted = True
184 return deleted
184 return deleted
185
185
186 def calculateupdate(ui, repo, checkout):
186 def calculateupdate(ui, repo, checkout):
187 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
187 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
188 check out and where to move the active bookmark from, if needed.'''
188 check out and where to move the active bookmark from, if needed.'''
189 movemarkfrom = None
189 movemarkfrom = None
190 if checkout is None:
190 if checkout is None:
191 curmark = repo._bookmarkcurrent
191 curmark = repo._bookmarkcurrent
192 if iscurrent(repo):
192 if iscurrent(repo):
193 movemarkfrom = repo['.'].node()
193 movemarkfrom = repo['.'].node()
194 elif curmark:
194 elif curmark:
195 ui.status(_("updating to active bookmark %s\n") % curmark)
195 ui.status(_("updating to active bookmark %s\n") % curmark)
196 checkout = curmark
196 checkout = curmark
197 return (checkout, movemarkfrom)
197 return (checkout, movemarkfrom)
198
198
199 def update(repo, parents, node):
199 def update(repo, parents, node):
200 deletefrom = parents
200 deletefrom = parents
201 marks = repo._bookmarks
201 marks = repo._bookmarks
202 update = False
202 update = False
203 cur = repo._bookmarkcurrent
203 cur = repo._bookmarkcurrent
204 if not cur:
204 if not cur:
205 return False
205 return False
206
206
207 if marks[cur] in parents:
207 if marks[cur] in parents:
208 new = repo[node]
208 new = repo[node]
209 divs = [repo[b] for b in marks
209 divs = [repo[b] for b in marks
210 if b.split('@', 1)[0] == cur.split('@', 1)[0]]
210 if b.split('@', 1)[0] == cur.split('@', 1)[0]]
211 anc = repo.changelog.ancestors([new.rev()])
211 anc = repo.changelog.ancestors([new.rev()])
212 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
212 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
213 if validdest(repo, repo[marks[cur]], new):
213 if validdest(repo, repo[marks[cur]], new):
214 marks[cur] = new.node()
214 marks[cur] = new.node()
215 update = True
215 update = True
216
216
217 if deletedivergent(repo, deletefrom, cur):
217 if deletedivergent(repo, deletefrom, cur):
218 update = True
218 update = True
219
219
220 if update:
220 if update:
221 marks.write()
221 marks.write()
222 return update
222 return update
223
223
224 def listbookmarks(repo):
224 def listbookmarks(repo):
225 # We may try to list bookmarks on a repo type that does not
225 # We may try to list bookmarks on a repo type that does not
226 # support it (e.g., statichttprepository).
226 # support it (e.g., statichttprepository).
227 marks = getattr(repo, '_bookmarks', {})
227 marks = getattr(repo, '_bookmarks', {})
228
228
229 d = {}
229 d = {}
230 hasnode = repo.changelog.hasnode
230 hasnode = repo.changelog.hasnode
231 for k, v in marks.iteritems():
231 for k, v in marks.iteritems():
232 # don't expose local divergent bookmarks
232 # don't expose local divergent bookmarks
233 if hasnode(v) and ('@' not in k or k.endswith('@')):
233 if hasnode(v) and ('@' not in k or k.endswith('@')):
234 d[k] = hex(v)
234 d[k] = hex(v)
235 return d
235 return d
236
236
237 def pushbookmark(repo, key, old, new):
237 def pushbookmark(repo, key, old, new):
238 w = repo.wlock()
238 w = repo.wlock()
239 try:
239 try:
240 marks = repo._bookmarks
240 marks = repo._bookmarks
241 existing = hex(marks.get(key, ''))
241 existing = hex(marks.get(key, ''))
242 if existing != old and existing != new:
242 if existing != old and existing != new:
243 return False
243 return False
244 if new == '':
244 if new == '':
245 del marks[key]
245 del marks[key]
246 else:
246 else:
247 if new not in repo:
247 if new not in repo:
248 return False
248 return False
249 marks[key] = repo[new].node()
249 marks[key] = repo[new].node()
250 marks.write()
250 marks.write()
251 return True
251 return True
252 finally:
252 finally:
253 w.release()
253 w.release()
254
254
255 def compare(repo, srcmarks, dstmarks,
255 def compare(repo, srcmarks, dstmarks,
256 srchex=None, dsthex=None, targets=None):
256 srchex=None, dsthex=None, targets=None):
257 '''Compare bookmarks between srcmarks and dstmarks
257 '''Compare bookmarks between srcmarks and dstmarks
258
258
259 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
259 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
260 differ, invalid)", each are list of bookmarks below:
260 differ, invalid)", each are list of bookmarks below:
261
261
262 :addsrc: added on src side (removed on dst side, perhaps)
262 :addsrc: added on src side (removed on dst side, perhaps)
263 :adddst: added on dst side (removed on src side, perhaps)
263 :adddst: added on dst side (removed on src side, perhaps)
264 :advsrc: advanced on src side
264 :advsrc: advanced on src side
265 :advdst: advanced on dst side
265 :advdst: advanced on dst side
266 :diverge: diverge
266 :diverge: diverge
267 :differ: changed, but changeset referred on src is unknown on dst
267 :differ: changed, but changeset referred on src is unknown on dst
268 :invalid: unknown on both side
268 :invalid: unknown on both side
269
269
270 Each elements of lists in result tuple is tuple "(bookmark name,
270 Each elements of lists in result tuple is tuple "(bookmark name,
271 changeset ID on source side, changeset ID on destination
271 changeset ID on source side, changeset ID on destination
272 side)". Each changeset IDs are 40 hexadecimal digit string or
272 side)". Each changeset IDs are 40 hexadecimal digit string or
273 None.
273 None.
274
274
275 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
275 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
276 "invalid" list may be unknown for repo.
276 "invalid" list may be unknown for repo.
277
277
278 This function expects that "srcmarks" and "dstmarks" return
278 This function expects that "srcmarks" and "dstmarks" return
279 changeset ID in 40 hexadecimal digit string for specified
279 changeset ID in 40 hexadecimal digit string for specified
280 bookmark. If not so (e.g. bmstore "repo._bookmarks" returning
280 bookmark. If not so (e.g. bmstore "repo._bookmarks" returning
281 binary value), "srchex" or "dsthex" should be specified to convert
281 binary value), "srchex" or "dsthex" should be specified to convert
282 into such form.
282 into such form.
283
283
284 If "targets" is specified, only bookmarks listed in it are
284 If "targets" is specified, only bookmarks listed in it are
285 examined.
285 examined.
286 '''
286 '''
287 if not srchex:
287 if not srchex:
288 srchex = lambda x: x
288 srchex = lambda x: x
289 if not dsthex:
289 if not dsthex:
290 dsthex = lambda x: x
290 dsthex = lambda x: x
291
291
292 if targets:
292 if targets:
293 bset = set(targets)
293 bset = set(targets)
294 else:
294 else:
295 srcmarkset = set(srcmarks)
295 srcmarkset = set(srcmarks)
296 dstmarkset = set(dstmarks)
296 dstmarkset = set(dstmarks)
297 bset = srcmarkset ^ dstmarkset
297 bset = srcmarkset ^ dstmarkset
298 for b in srcmarkset & dstmarkset:
298 for b in srcmarkset & dstmarkset:
299 if srchex(srcmarks[b]) != dsthex(dstmarks[b]):
299 if srchex(srcmarks[b]) != dsthex(dstmarks[b]):
300 bset.add(b)
300 bset.add(b)
301
301
302 results = ([], [], [], [], [], [], [])
302 results = ([], [], [], [], [], [], [])
303 addsrc = results[0].append
303 addsrc = results[0].append
304 adddst = results[1].append
304 adddst = results[1].append
305 advsrc = results[2].append
305 advsrc = results[2].append
306 advdst = results[3].append
306 advdst = results[3].append
307 diverge = results[4].append
307 diverge = results[4].append
308 differ = results[5].append
308 differ = results[5].append
309 invalid = results[6].append
309 invalid = results[6].append
310
310
311 for b in sorted(bset):
311 for b in sorted(bset):
312 if b not in srcmarks:
312 if b not in srcmarks:
313 if b in dstmarks:
313 if b in dstmarks:
314 adddst((b, None, dsthex(dstmarks[b])))
314 adddst((b, None, dsthex(dstmarks[b])))
315 else:
315 else:
316 invalid((b, None, None))
316 invalid((b, None, None))
317 elif b not in dstmarks:
317 elif b not in dstmarks:
318 addsrc((b, srchex(srcmarks[b]), None))
318 addsrc((b, srchex(srcmarks[b]), None))
319 else:
319 else:
320 scid = srchex(srcmarks[b])
320 scid = srchex(srcmarks[b])
321 dcid = dsthex(dstmarks[b])
321 dcid = dsthex(dstmarks[b])
322 if scid in repo and dcid in repo:
322 if scid in repo and dcid in repo:
323 sctx = repo[scid]
323 sctx = repo[scid]
324 dctx = repo[dcid]
324 dctx = repo[dcid]
325 if sctx.rev() < dctx.rev():
325 if sctx.rev() < dctx.rev():
326 if validdest(repo, sctx, dctx):
326 if validdest(repo, sctx, dctx):
327 advdst((b, scid, dcid))
327 advdst((b, scid, dcid))
328 else:
328 else:
329 diverge((b, scid, dcid))
329 diverge((b, scid, dcid))
330 else:
330 else:
331 if validdest(repo, dctx, sctx):
331 if validdest(repo, dctx, sctx):
332 advsrc((b, scid, dcid))
332 advsrc((b, scid, dcid))
333 else:
333 else:
334 diverge((b, scid, dcid))
334 diverge((b, scid, dcid))
335 else:
335 else:
336 # it is too expensive to examine in detail, in this case
336 # it is too expensive to examine in detail, in this case
337 differ((b, scid, dcid))
337 differ((b, scid, dcid))
338
338
339 return results
339 return results
340
340
341 def _diverge(ui, b, path, localmarks):
341 def _diverge(ui, b, path, localmarks):
342 if b == '@':
342 if b == '@':
343 b = ''
343 b = ''
344 # find a unique @ suffix
344 # find a unique @ suffix
345 for x in range(1, 100):
345 for x in range(1, 100):
346 n = '%s@%d' % (b, x)
346 n = '%s@%d' % (b, x)
347 if n not in localmarks:
347 if n not in localmarks:
348 break
348 break
349 # try to use an @pathalias suffix
349 # try to use an @pathalias suffix
350 # if an @pathalias already exists, we overwrite (update) it
350 # if an @pathalias already exists, we overwrite (update) it
351 if path.startswith("file:"):
351 if path.startswith("file:"):
352 path = util.url(path).path
352 path = util.url(path).path
353 for p, u in ui.configitems("paths"):
353 for p, u in ui.configitems("paths"):
354 if u.startswith("file:"):
354 if u.startswith("file:"):
355 u = util.url(u).path
355 u = util.url(u).path
356 if path == u:
356 if path == u:
357 n = '%s@%s' % (b, p)
357 n = '%s@%s' % (b, p)
358 return n
358 return n
359
359
360 def updatefromremote(ui, repo, remotemarks, path, explicit=()):
360 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
361 ui.debug("checking for updated bookmarks\n")
361 ui.debug("checking for updated bookmarks\n")
362 localmarks = repo._bookmarks
362 localmarks = repo._bookmarks
363 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
363 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
364 ) = compare(repo, remotemarks, localmarks, dsthex=hex)
364 ) = compare(repo, remotemarks, localmarks, dsthex=hex)
365
365
366 status = ui.status
366 status = ui.status
367 warn = ui.warn
367 warn = ui.warn
368 if ui.configbool('ui', 'quietbookmarkmove', False):
368 if ui.configbool('ui', 'quietbookmarkmove', False):
369 status = warn = ui.debug
369 status = warn = ui.debug
370
370
371 explicit = set(explicit)
371 explicit = set(explicit)
372 changed = []
372 changed = []
373 for b, scid, dcid in addsrc:
373 for b, scid, dcid in addsrc:
374 if scid in repo: # add remote bookmarks for changes we already have
374 if scid in repo: # add remote bookmarks for changes we already have
375 changed.append((b, bin(scid), status,
375 changed.append((b, bin(scid), status,
376 _("adding remote bookmark %s\n") % (b)))
376 _("adding remote bookmark %s\n") % (b)))
377 for b, scid, dcid in advsrc:
377 for b, scid, dcid in advsrc:
378 changed.append((b, bin(scid), status,
378 changed.append((b, bin(scid), status,
379 _("updating bookmark %s\n") % (b)))
379 _("updating bookmark %s\n") % (b)))
380 # remove normal movement from explicit set
380 # remove normal movement from explicit set
381 explicit.difference_update(d[0] for d in changed)
381 explicit.difference_update(d[0] for d in changed)
382
382
383 for b, scid, dcid in diverge:
383 for b, scid, dcid in diverge:
384 if b in explicit:
384 if b in explicit:
385 explicit.discard(b)
385 explicit.discard(b)
386 changed.append((b, bin(scid), status,
386 changed.append((b, bin(scid), status,
387 _("importing bookmark %s\n") % (b, b)))
387 _("importing bookmark %s\n") % (b, b)))
388 else:
388 else:
389 db = _diverge(ui, b, path, localmarks)
389 db = _diverge(ui, b, path, localmarks)
390 changed.append((db, bin(scid), warn,
390 changed.append((db, bin(scid), warn,
391 _("divergent bookmark %s stored as %s\n")
391 _("divergent bookmark %s stored as %s\n")
392 % (b, db)))
392 % (b, db)))
393 for b, scid, dcid in adddst + advdst:
393 for b, scid, dcid in adddst + advdst:
394 if b in explicit:
394 if b in explicit:
395 explicit.discard(b)
395 explicit.discard(b)
396 changed.append((b, bin(scid), status,
396 changed.append((b, bin(scid), status,
397 _("importing bookmark %s\n") % (b, b)))
397 _("importing bookmark %s\n") % (b, b)))
398
398
399 if changed:
399 if changed:
400 tr = trfunc()
400 for b, node, writer, msg in sorted(changed):
401 for b, node, writer, msg in sorted(changed):
401 localmarks[b] = node
402 localmarks[b] = node
402 writer(msg)
403 writer(msg)
403 localmarks.write()
404 localmarks.recordchange(tr)
404
405
405 def diff(ui, dst, src):
406 def diff(ui, dst, src):
406 ui.status(_("searching for changed bookmarks\n"))
407 ui.status(_("searching for changed bookmarks\n"))
407
408
408 smarks = src.listkeys('bookmarks')
409 smarks = src.listkeys('bookmarks')
409 dmarks = dst.listkeys('bookmarks')
410 dmarks = dst.listkeys('bookmarks')
410
411
411 diff = sorted(set(smarks) - set(dmarks))
412 diff = sorted(set(smarks) - set(dmarks))
412 for k in diff:
413 for k in diff:
413 mark = ui.debugflag and smarks[k] or smarks[k][:12]
414 mark = ui.debugflag and smarks[k] or smarks[k][:12]
414 ui.write(" %-25s %s\n" % (k, mark))
415 ui.write(" %-25s %s\n" % (k, mark))
415
416
416 if len(diff) <= 0:
417 if len(diff) <= 0:
417 ui.status(_("no changed bookmarks found\n"))
418 ui.status(_("no changed bookmarks found\n"))
418 return 1
419 return 1
419 return 0
420 return 0
420
421
421 def validdest(repo, old, new):
422 def validdest(repo, old, new):
422 """Is the new bookmark destination a valid update from the old one"""
423 """Is the new bookmark destination a valid update from the old one"""
423 repo = repo.unfiltered()
424 repo = repo.unfiltered()
424 if old == new:
425 if old == new:
425 # Old == new -> nothing to update.
426 # Old == new -> nothing to update.
426 return False
427 return False
427 elif not old:
428 elif not old:
428 # old is nullrev, anything is valid.
429 # old is nullrev, anything is valid.
429 # (new != nullrev has been excluded by the previous check)
430 # (new != nullrev has been excluded by the previous check)
430 return True
431 return True
431 elif repo.obsstore:
432 elif repo.obsstore:
432 return new.node() in obsolete.foreground(repo, [old.node()])
433 return new.node() in obsolete.foreground(repo, [old.node()])
433 else:
434 else:
434 # still an independent clause as it is lazyer (and therefore faster)
435 # still an independent clause as it is lazyer (and therefore faster)
435 return old.descendant(new)
436 return old.descendant(new)
@@ -1,1209 +1,1210 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.cg1unpacker(fh, alg)
34 return changegroup.cg1unpacker(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40 def buildobsmarkerspart(bundler, markers):
40 def buildobsmarkerspart(bundler, markers):
41 """add an obsmarker part to the bundler with <markers>
41 """add an obsmarker part to the bundler with <markers>
42
42
43 No part is created if markers is empty.
43 No part is created if markers is empty.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 """
45 """
46 if markers:
46 if markers:
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 version = obsolete.commonversion(remoteversions)
48 version = obsolete.commonversion(remoteversions)
49 if version is None:
49 if version is None:
50 raise ValueError('bundler do not support common obsmarker format')
50 raise ValueError('bundler do not support common obsmarker format')
51 stream = obsolete.encodemarkers(markers, True, version=version)
51 stream = obsolete.encodemarkers(markers, True, version=version)
52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
53 return None
53 return None
54
54
55 class pushoperation(object):
55 class pushoperation(object):
56 """A object that represent a single push operation
56 """A object that represent a single push operation
57
57
58 It purpose is to carry push related state and very common operation.
58 It purpose is to carry push related state and very common operation.
59
59
60 A new should be created at the beginning of each push and discarded
60 A new should be created at the beginning of each push and discarded
61 afterward.
61 afterward.
62 """
62 """
63
63
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
65 bookmarks=()):
65 bookmarks=()):
66 # repo we push from
66 # repo we push from
67 self.repo = repo
67 self.repo = repo
68 self.ui = repo.ui
68 self.ui = repo.ui
69 # repo we push to
69 # repo we push to
70 self.remote = remote
70 self.remote = remote
71 # force option provided
71 # force option provided
72 self.force = force
72 self.force = force
73 # revs to be pushed (None is "all")
73 # revs to be pushed (None is "all")
74 self.revs = revs
74 self.revs = revs
75 # bookmark explicitly pushed
75 # bookmark explicitly pushed
76 self.bookmarks = bookmarks
76 self.bookmarks = bookmarks
77 # allow push of new branch
77 # allow push of new branch
78 self.newbranch = newbranch
78 self.newbranch = newbranch
79 # did a local lock get acquired?
79 # did a local lock get acquired?
80 self.locallocked = None
80 self.locallocked = None
81 # step already performed
81 # step already performed
82 # (used to check what steps have been already performed through bundle2)
82 # (used to check what steps have been already performed through bundle2)
83 self.stepsdone = set()
83 self.stepsdone = set()
84 # Integer version of the changegroup push result
84 # Integer version of the changegroup push result
85 # - None means nothing to push
85 # - None means nothing to push
86 # - 0 means HTTP error
86 # - 0 means HTTP error
87 # - 1 means we pushed and remote head count is unchanged *or*
87 # - 1 means we pushed and remote head count is unchanged *or*
88 # we have outgoing changesets but refused to push
88 # we have outgoing changesets but refused to push
89 # - other values as described by addchangegroup()
89 # - other values as described by addchangegroup()
90 self.cgresult = None
90 self.cgresult = None
91 # Boolean value for the bookmark push
91 # Boolean value for the bookmark push
92 self.bkresult = None
92 self.bkresult = None
93 # discover.outgoing object (contains common and outgoing data)
93 # discover.outgoing object (contains common and outgoing data)
94 self.outgoing = None
94 self.outgoing = None
95 # all remote heads before the push
95 # all remote heads before the push
96 self.remoteheads = None
96 self.remoteheads = None
97 # testable as a boolean indicating if any nodes are missing locally.
97 # testable as a boolean indicating if any nodes are missing locally.
98 self.incoming = None
98 self.incoming = None
99 # phases changes that must be pushed along side the changesets
99 # phases changes that must be pushed along side the changesets
100 self.outdatedphases = None
100 self.outdatedphases = None
101 # phases changes that must be pushed if changeset push fails
101 # phases changes that must be pushed if changeset push fails
102 self.fallbackoutdatedphases = None
102 self.fallbackoutdatedphases = None
103 # outgoing obsmarkers
103 # outgoing obsmarkers
104 self.outobsmarkers = set()
104 self.outobsmarkers = set()
105 # outgoing bookmarks
105 # outgoing bookmarks
106 self.outbookmarks = []
106 self.outbookmarks = []
107
107
108 @util.propertycache
108 @util.propertycache
109 def futureheads(self):
109 def futureheads(self):
110 """future remote heads if the changeset push succeeds"""
110 """future remote heads if the changeset push succeeds"""
111 return self.outgoing.missingheads
111 return self.outgoing.missingheads
112
112
113 @util.propertycache
113 @util.propertycache
114 def fallbackheads(self):
114 def fallbackheads(self):
115 """future remote heads if the changeset push fails"""
115 """future remote heads if the changeset push fails"""
116 if self.revs is None:
116 if self.revs is None:
117 # not target to push, all common are relevant
117 # not target to push, all common are relevant
118 return self.outgoing.commonheads
118 return self.outgoing.commonheads
119 unfi = self.repo.unfiltered()
119 unfi = self.repo.unfiltered()
120 # I want cheads = heads(::missingheads and ::commonheads)
120 # I want cheads = heads(::missingheads and ::commonheads)
121 # (missingheads is revs with secret changeset filtered out)
121 # (missingheads is revs with secret changeset filtered out)
122 #
122 #
123 # This can be expressed as:
123 # This can be expressed as:
124 # cheads = ( (missingheads and ::commonheads)
124 # cheads = ( (missingheads and ::commonheads)
125 # + (commonheads and ::missingheads))"
125 # + (commonheads and ::missingheads))"
126 # )
126 # )
127 #
127 #
128 # while trying to push we already computed the following:
128 # while trying to push we already computed the following:
129 # common = (::commonheads)
129 # common = (::commonheads)
130 # missing = ((commonheads::missingheads) - commonheads)
130 # missing = ((commonheads::missingheads) - commonheads)
131 #
131 #
132 # We can pick:
132 # We can pick:
133 # * missingheads part of common (::commonheads)
133 # * missingheads part of common (::commonheads)
134 common = set(self.outgoing.common)
134 common = set(self.outgoing.common)
135 nm = self.repo.changelog.nodemap
135 nm = self.repo.changelog.nodemap
136 cheads = [node for node in self.revs if nm[node] in common]
136 cheads = [node for node in self.revs if nm[node] in common]
137 # and
137 # and
138 # * commonheads parents on missing
138 # * commonheads parents on missing
139 revset = unfi.set('%ln and parents(roots(%ln))',
139 revset = unfi.set('%ln and parents(roots(%ln))',
140 self.outgoing.commonheads,
140 self.outgoing.commonheads,
141 self.outgoing.missing)
141 self.outgoing.missing)
142 cheads.extend(c.node() for c in revset)
142 cheads.extend(c.node() for c in revset)
143 return cheads
143 return cheads
144
144
145 @property
145 @property
146 def commonheads(self):
146 def commonheads(self):
147 """set of all common heads after changeset bundle push"""
147 """set of all common heads after changeset bundle push"""
148 if self.cgresult:
148 if self.cgresult:
149 return self.futureheads
149 return self.futureheads
150 else:
150 else:
151 return self.fallbackheads
151 return self.fallbackheads
152
152
153 # mapping of message used when pushing bookmark
153 # mapping of message used when pushing bookmark
154 bookmsgmap = {'update': (_("updating bookmark %s\n"),
154 bookmsgmap = {'update': (_("updating bookmark %s\n"),
155 _('updating bookmark %s failed!\n')),
155 _('updating bookmark %s failed!\n')),
156 'export': (_("exporting bookmark %s\n"),
156 'export': (_("exporting bookmark %s\n"),
157 _('exporting bookmark %s failed!\n')),
157 _('exporting bookmark %s failed!\n')),
158 'delete': (_("deleting remote bookmark %s\n"),
158 'delete': (_("deleting remote bookmark %s\n"),
159 _('deleting remote bookmark %s failed!\n')),
159 _('deleting remote bookmark %s failed!\n')),
160 }
160 }
161
161
162
162
163 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
163 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
164 '''Push outgoing changesets (limited by revs) from a local
164 '''Push outgoing changesets (limited by revs) from a local
165 repository to remote. Return an integer:
165 repository to remote. Return an integer:
166 - None means nothing to push
166 - None means nothing to push
167 - 0 means HTTP error
167 - 0 means HTTP error
168 - 1 means we pushed and remote head count is unchanged *or*
168 - 1 means we pushed and remote head count is unchanged *or*
169 we have outgoing changesets but refused to push
169 we have outgoing changesets but refused to push
170 - other values as described by addchangegroup()
170 - other values as described by addchangegroup()
171 '''
171 '''
172 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
172 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
173 if pushop.remote.local():
173 if pushop.remote.local():
174 missing = (set(pushop.repo.requirements)
174 missing = (set(pushop.repo.requirements)
175 - pushop.remote.local().supported)
175 - pushop.remote.local().supported)
176 if missing:
176 if missing:
177 msg = _("required features are not"
177 msg = _("required features are not"
178 " supported in the destination:"
178 " supported in the destination:"
179 " %s") % (', '.join(sorted(missing)))
179 " %s") % (', '.join(sorted(missing)))
180 raise util.Abort(msg)
180 raise util.Abort(msg)
181
181
182 # there are two ways to push to remote repo:
182 # there are two ways to push to remote repo:
183 #
183 #
184 # addchangegroup assumes local user can lock remote
184 # addchangegroup assumes local user can lock remote
185 # repo (local filesystem, old ssh servers).
185 # repo (local filesystem, old ssh servers).
186 #
186 #
187 # unbundle assumes local user cannot lock remote repo (new ssh
187 # unbundle assumes local user cannot lock remote repo (new ssh
188 # servers, http servers).
188 # servers, http servers).
189
189
190 if not pushop.remote.canpush():
190 if not pushop.remote.canpush():
191 raise util.Abort(_("destination does not support push"))
191 raise util.Abort(_("destination does not support push"))
192 # get local lock as we might write phase data
192 # get local lock as we might write phase data
193 locallock = None
193 locallock = None
194 try:
194 try:
195 locallock = pushop.repo.lock()
195 locallock = pushop.repo.lock()
196 pushop.locallocked = True
196 pushop.locallocked = True
197 except IOError, err:
197 except IOError, err:
198 pushop.locallocked = False
198 pushop.locallocked = False
199 if err.errno != errno.EACCES:
199 if err.errno != errno.EACCES:
200 raise
200 raise
201 # source repo cannot be locked.
201 # source repo cannot be locked.
202 # We do not abort the push, but just disable the local phase
202 # We do not abort the push, but just disable the local phase
203 # synchronisation.
203 # synchronisation.
204 msg = 'cannot lock source repository: %s\n' % err
204 msg = 'cannot lock source repository: %s\n' % err
205 pushop.ui.debug(msg)
205 pushop.ui.debug(msg)
206 try:
206 try:
207 pushop.repo.checkpush(pushop)
207 pushop.repo.checkpush(pushop)
208 lock = None
208 lock = None
209 unbundle = pushop.remote.capable('unbundle')
209 unbundle = pushop.remote.capable('unbundle')
210 if not unbundle:
210 if not unbundle:
211 lock = pushop.remote.lock()
211 lock = pushop.remote.lock()
212 try:
212 try:
213 _pushdiscovery(pushop)
213 _pushdiscovery(pushop)
214 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
214 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
215 False)
215 False)
216 and pushop.remote.capable('bundle2-exp')):
216 and pushop.remote.capable('bundle2-exp')):
217 _pushbundle2(pushop)
217 _pushbundle2(pushop)
218 _pushchangeset(pushop)
218 _pushchangeset(pushop)
219 _pushsyncphase(pushop)
219 _pushsyncphase(pushop)
220 _pushobsolete(pushop)
220 _pushobsolete(pushop)
221 _pushbookmark(pushop)
221 _pushbookmark(pushop)
222 finally:
222 finally:
223 if lock is not None:
223 if lock is not None:
224 lock.release()
224 lock.release()
225 finally:
225 finally:
226 if locallock is not None:
226 if locallock is not None:
227 locallock.release()
227 locallock.release()
228
228
229 return pushop
229 return pushop
230
230
231 # list of steps to perform discovery before push
231 # list of steps to perform discovery before push
232 pushdiscoveryorder = []
232 pushdiscoveryorder = []
233
233
234 # Mapping between step name and function
234 # Mapping between step name and function
235 #
235 #
236 # This exists to help extensions wrap steps if necessary
236 # This exists to help extensions wrap steps if necessary
237 pushdiscoverymapping = {}
237 pushdiscoverymapping = {}
238
238
239 def pushdiscovery(stepname):
239 def pushdiscovery(stepname):
240 """decorator for function performing discovery before push
240 """decorator for function performing discovery before push
241
241
242 The function is added to the step -> function mapping and appended to the
242 The function is added to the step -> function mapping and appended to the
243 list of steps. Beware that decorated function will be added in order (this
243 list of steps. Beware that decorated function will be added in order (this
244 may matter).
244 may matter).
245
245
246 You can only use this decorator for a new step, if you want to wrap a step
246 You can only use this decorator for a new step, if you want to wrap a step
247 from an extension, change the pushdiscovery dictionary directly."""
247 from an extension, change the pushdiscovery dictionary directly."""
248 def dec(func):
248 def dec(func):
249 assert stepname not in pushdiscoverymapping
249 assert stepname not in pushdiscoverymapping
250 pushdiscoverymapping[stepname] = func
250 pushdiscoverymapping[stepname] = func
251 pushdiscoveryorder.append(stepname)
251 pushdiscoveryorder.append(stepname)
252 return func
252 return func
253 return dec
253 return dec
254
254
255 def _pushdiscovery(pushop):
255 def _pushdiscovery(pushop):
256 """Run all discovery steps"""
256 """Run all discovery steps"""
257 for stepname in pushdiscoveryorder:
257 for stepname in pushdiscoveryorder:
258 step = pushdiscoverymapping[stepname]
258 step = pushdiscoverymapping[stepname]
259 step(pushop)
259 step(pushop)
260
260
261 @pushdiscovery('changeset')
261 @pushdiscovery('changeset')
262 def _pushdiscoverychangeset(pushop):
262 def _pushdiscoverychangeset(pushop):
263 """discover the changeset that need to be pushed"""
263 """discover the changeset that need to be pushed"""
264 unfi = pushop.repo.unfiltered()
264 unfi = pushop.repo.unfiltered()
265 fci = discovery.findcommonincoming
265 fci = discovery.findcommonincoming
266 commoninc = fci(unfi, pushop.remote, force=pushop.force)
266 commoninc = fci(unfi, pushop.remote, force=pushop.force)
267 common, inc, remoteheads = commoninc
267 common, inc, remoteheads = commoninc
268 fco = discovery.findcommonoutgoing
268 fco = discovery.findcommonoutgoing
269 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
269 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
270 commoninc=commoninc, force=pushop.force)
270 commoninc=commoninc, force=pushop.force)
271 pushop.outgoing = outgoing
271 pushop.outgoing = outgoing
272 pushop.remoteheads = remoteheads
272 pushop.remoteheads = remoteheads
273 pushop.incoming = inc
273 pushop.incoming = inc
274
274
275 @pushdiscovery('phase')
275 @pushdiscovery('phase')
276 def _pushdiscoveryphase(pushop):
276 def _pushdiscoveryphase(pushop):
277 """discover the phase that needs to be pushed
277 """discover the phase that needs to be pushed
278
278
279 (computed for both success and failure case for changesets push)"""
279 (computed for both success and failure case for changesets push)"""
280 outgoing = pushop.outgoing
280 outgoing = pushop.outgoing
281 unfi = pushop.repo.unfiltered()
281 unfi = pushop.repo.unfiltered()
282 remotephases = pushop.remote.listkeys('phases')
282 remotephases = pushop.remote.listkeys('phases')
283 publishing = remotephases.get('publishing', False)
283 publishing = remotephases.get('publishing', False)
284 ana = phases.analyzeremotephases(pushop.repo,
284 ana = phases.analyzeremotephases(pushop.repo,
285 pushop.fallbackheads,
285 pushop.fallbackheads,
286 remotephases)
286 remotephases)
287 pheads, droots = ana
287 pheads, droots = ana
288 extracond = ''
288 extracond = ''
289 if not publishing:
289 if not publishing:
290 extracond = ' and public()'
290 extracond = ' and public()'
291 revset = 'heads((%%ln::%%ln) %s)' % extracond
291 revset = 'heads((%%ln::%%ln) %s)' % extracond
292 # Get the list of all revs draft on remote by public here.
292 # Get the list of all revs draft on remote by public here.
293 # XXX Beware that revset break if droots is not strictly
293 # XXX Beware that revset break if droots is not strictly
294 # XXX root we may want to ensure it is but it is costly
294 # XXX root we may want to ensure it is but it is costly
295 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
295 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
296 if not outgoing.missing:
296 if not outgoing.missing:
297 future = fallback
297 future = fallback
298 else:
298 else:
299 # adds changeset we are going to push as draft
299 # adds changeset we are going to push as draft
300 #
300 #
301 # should not be necessary for pushblishing server, but because of an
301 # should not be necessary for pushblishing server, but because of an
302 # issue fixed in xxxxx we have to do it anyway.
302 # issue fixed in xxxxx we have to do it anyway.
303 fdroots = list(unfi.set('roots(%ln + %ln::)',
303 fdroots = list(unfi.set('roots(%ln + %ln::)',
304 outgoing.missing, droots))
304 outgoing.missing, droots))
305 fdroots = [f.node() for f in fdroots]
305 fdroots = [f.node() for f in fdroots]
306 future = list(unfi.set(revset, fdroots, pushop.futureheads))
306 future = list(unfi.set(revset, fdroots, pushop.futureheads))
307 pushop.outdatedphases = future
307 pushop.outdatedphases = future
308 pushop.fallbackoutdatedphases = fallback
308 pushop.fallbackoutdatedphases = fallback
309
309
310 @pushdiscovery('obsmarker')
310 @pushdiscovery('obsmarker')
311 def _pushdiscoveryobsmarkers(pushop):
311 def _pushdiscoveryobsmarkers(pushop):
312 if (obsolete._enabled
312 if (obsolete._enabled
313 and pushop.repo.obsstore
313 and pushop.repo.obsstore
314 and 'obsolete' in pushop.remote.listkeys('namespaces')):
314 and 'obsolete' in pushop.remote.listkeys('namespaces')):
315 repo = pushop.repo
315 repo = pushop.repo
316 # very naive computation, that can be quite expensive on big repo.
316 # very naive computation, that can be quite expensive on big repo.
317 # However: evolution is currently slow on them anyway.
317 # However: evolution is currently slow on them anyway.
318 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
318 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
319 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
319 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
320
320
321 @pushdiscovery('bookmarks')
321 @pushdiscovery('bookmarks')
322 def _pushdiscoverybookmarks(pushop):
322 def _pushdiscoverybookmarks(pushop):
323 ui = pushop.ui
323 ui = pushop.ui
324 repo = pushop.repo.unfiltered()
324 repo = pushop.repo.unfiltered()
325 remote = pushop.remote
325 remote = pushop.remote
326 ui.debug("checking for updated bookmarks\n")
326 ui.debug("checking for updated bookmarks\n")
327 ancestors = ()
327 ancestors = ()
328 if pushop.revs:
328 if pushop.revs:
329 revnums = map(repo.changelog.rev, pushop.revs)
329 revnums = map(repo.changelog.rev, pushop.revs)
330 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
330 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
331 remotebookmark = remote.listkeys('bookmarks')
331 remotebookmark = remote.listkeys('bookmarks')
332
332
333 explicit = set(pushop.bookmarks)
333 explicit = set(pushop.bookmarks)
334
334
335 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
335 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
336 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
336 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
337 for b, scid, dcid in advsrc:
337 for b, scid, dcid in advsrc:
338 if b in explicit:
338 if b in explicit:
339 explicit.remove(b)
339 explicit.remove(b)
340 if not ancestors or repo[scid].rev() in ancestors:
340 if not ancestors or repo[scid].rev() in ancestors:
341 pushop.outbookmarks.append((b, dcid, scid))
341 pushop.outbookmarks.append((b, dcid, scid))
342 # search added bookmark
342 # search added bookmark
343 for b, scid, dcid in addsrc:
343 for b, scid, dcid in addsrc:
344 if b in explicit:
344 if b in explicit:
345 explicit.remove(b)
345 explicit.remove(b)
346 pushop.outbookmarks.append((b, '', scid))
346 pushop.outbookmarks.append((b, '', scid))
347 # search for overwritten bookmark
347 # search for overwritten bookmark
348 for b, scid, dcid in advdst + diverge + differ:
348 for b, scid, dcid in advdst + diverge + differ:
349 if b in explicit:
349 if b in explicit:
350 explicit.remove(b)
350 explicit.remove(b)
351 pushop.outbookmarks.append((b, dcid, scid))
351 pushop.outbookmarks.append((b, dcid, scid))
352 # search for bookmark to delete
352 # search for bookmark to delete
353 for b, scid, dcid in adddst:
353 for b, scid, dcid in adddst:
354 if b in explicit:
354 if b in explicit:
355 explicit.remove(b)
355 explicit.remove(b)
356 # treat as "deleted locally"
356 # treat as "deleted locally"
357 pushop.outbookmarks.append((b, dcid, ''))
357 pushop.outbookmarks.append((b, dcid, ''))
358
358
359 if explicit:
359 if explicit:
360 explicit = sorted(explicit)
360 explicit = sorted(explicit)
361 # we should probably list all of them
361 # we should probably list all of them
362 ui.warn(_('bookmark %s does not exist on the local '
362 ui.warn(_('bookmark %s does not exist on the local '
363 'or remote repository!\n') % explicit[0])
363 'or remote repository!\n') % explicit[0])
364 pushop.bkresult = 2
364 pushop.bkresult = 2
365
365
366 pushop.outbookmarks.sort()
366 pushop.outbookmarks.sort()
367
367
368 def _pushcheckoutgoing(pushop):
368 def _pushcheckoutgoing(pushop):
369 outgoing = pushop.outgoing
369 outgoing = pushop.outgoing
370 unfi = pushop.repo.unfiltered()
370 unfi = pushop.repo.unfiltered()
371 if not outgoing.missing:
371 if not outgoing.missing:
372 # nothing to push
372 # nothing to push
373 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
373 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
374 return False
374 return False
375 # something to push
375 # something to push
376 if not pushop.force:
376 if not pushop.force:
377 # if repo.obsstore == False --> no obsolete
377 # if repo.obsstore == False --> no obsolete
378 # then, save the iteration
378 # then, save the iteration
379 if unfi.obsstore:
379 if unfi.obsstore:
380 # this message are here for 80 char limit reason
380 # this message are here for 80 char limit reason
381 mso = _("push includes obsolete changeset: %s!")
381 mso = _("push includes obsolete changeset: %s!")
382 mst = {"unstable": _("push includes unstable changeset: %s!"),
382 mst = {"unstable": _("push includes unstable changeset: %s!"),
383 "bumped": _("push includes bumped changeset: %s!"),
383 "bumped": _("push includes bumped changeset: %s!"),
384 "divergent": _("push includes divergent changeset: %s!")}
384 "divergent": _("push includes divergent changeset: %s!")}
385 # If we are to push if there is at least one
385 # If we are to push if there is at least one
386 # obsolete or unstable changeset in missing, at
386 # obsolete or unstable changeset in missing, at
387 # least one of the missinghead will be obsolete or
387 # least one of the missinghead will be obsolete or
388 # unstable. So checking heads only is ok
388 # unstable. So checking heads only is ok
389 for node in outgoing.missingheads:
389 for node in outgoing.missingheads:
390 ctx = unfi[node]
390 ctx = unfi[node]
391 if ctx.obsolete():
391 if ctx.obsolete():
392 raise util.Abort(mso % ctx)
392 raise util.Abort(mso % ctx)
393 elif ctx.troubled():
393 elif ctx.troubled():
394 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
394 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
395 newbm = pushop.ui.configlist('bookmarks', 'pushing')
395 newbm = pushop.ui.configlist('bookmarks', 'pushing')
396 discovery.checkheads(unfi, pushop.remote, outgoing,
396 discovery.checkheads(unfi, pushop.remote, outgoing,
397 pushop.remoteheads,
397 pushop.remoteheads,
398 pushop.newbranch,
398 pushop.newbranch,
399 bool(pushop.incoming),
399 bool(pushop.incoming),
400 newbm)
400 newbm)
401 return True
401 return True
402
402
403 # List of names of steps to perform for an outgoing bundle2, order matters.
403 # List of names of steps to perform for an outgoing bundle2, order matters.
404 b2partsgenorder = []
404 b2partsgenorder = []
405
405
406 # Mapping between step name and function
406 # Mapping between step name and function
407 #
407 #
408 # This exists to help extensions wrap steps if necessary
408 # This exists to help extensions wrap steps if necessary
409 b2partsgenmapping = {}
409 b2partsgenmapping = {}
410
410
411 def b2partsgenerator(stepname):
411 def b2partsgenerator(stepname):
412 """decorator for function generating bundle2 part
412 """decorator for function generating bundle2 part
413
413
414 The function is added to the step -> function mapping and appended to the
414 The function is added to the step -> function mapping and appended to the
415 list of steps. Beware that decorated functions will be added in order
415 list of steps. Beware that decorated functions will be added in order
416 (this may matter).
416 (this may matter).
417
417
418 You can only use this decorator for new steps, if you want to wrap a step
418 You can only use this decorator for new steps, if you want to wrap a step
419 from an extension, attack the b2partsgenmapping dictionary directly."""
419 from an extension, attack the b2partsgenmapping dictionary directly."""
420 def dec(func):
420 def dec(func):
421 assert stepname not in b2partsgenmapping
421 assert stepname not in b2partsgenmapping
422 b2partsgenmapping[stepname] = func
422 b2partsgenmapping[stepname] = func
423 b2partsgenorder.append(stepname)
423 b2partsgenorder.append(stepname)
424 return func
424 return func
425 return dec
425 return dec
426
426
427 @b2partsgenerator('changeset')
427 @b2partsgenerator('changeset')
428 def _pushb2ctx(pushop, bundler):
428 def _pushb2ctx(pushop, bundler):
429 """handle changegroup push through bundle2
429 """handle changegroup push through bundle2
430
430
431 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
431 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
432 """
432 """
433 if 'changesets' in pushop.stepsdone:
433 if 'changesets' in pushop.stepsdone:
434 return
434 return
435 pushop.stepsdone.add('changesets')
435 pushop.stepsdone.add('changesets')
436 # Send known heads to the server for race detection.
436 # Send known heads to the server for race detection.
437 if not _pushcheckoutgoing(pushop):
437 if not _pushcheckoutgoing(pushop):
438 return
438 return
439 pushop.repo.prepushoutgoinghooks(pushop.repo,
439 pushop.repo.prepushoutgoinghooks(pushop.repo,
440 pushop.remote,
440 pushop.remote,
441 pushop.outgoing)
441 pushop.outgoing)
442 if not pushop.force:
442 if not pushop.force:
443 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
443 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
444 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing)
444 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing)
445 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
445 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
446 def handlereply(op):
446 def handlereply(op):
447 """extract addchangroup returns from server reply"""
447 """extract addchangroup returns from server reply"""
448 cgreplies = op.records.getreplies(cgpart.id)
448 cgreplies = op.records.getreplies(cgpart.id)
449 assert len(cgreplies['changegroup']) == 1
449 assert len(cgreplies['changegroup']) == 1
450 pushop.cgresult = cgreplies['changegroup'][0]['return']
450 pushop.cgresult = cgreplies['changegroup'][0]['return']
451 return handlereply
451 return handlereply
452
452
453 @b2partsgenerator('phase')
453 @b2partsgenerator('phase')
454 def _pushb2phases(pushop, bundler):
454 def _pushb2phases(pushop, bundler):
455 """handle phase push through bundle2"""
455 """handle phase push through bundle2"""
456 if 'phases' in pushop.stepsdone:
456 if 'phases' in pushop.stepsdone:
457 return
457 return
458 b2caps = bundle2.bundle2caps(pushop.remote)
458 b2caps = bundle2.bundle2caps(pushop.remote)
459 if not 'b2x:pushkey' in b2caps:
459 if not 'b2x:pushkey' in b2caps:
460 return
460 return
461 pushop.stepsdone.add('phases')
461 pushop.stepsdone.add('phases')
462 part2node = []
462 part2node = []
463 enc = pushkey.encode
463 enc = pushkey.encode
464 for newremotehead in pushop.outdatedphases:
464 for newremotehead in pushop.outdatedphases:
465 part = bundler.newpart('b2x:pushkey')
465 part = bundler.newpart('b2x:pushkey')
466 part.addparam('namespace', enc('phases'))
466 part.addparam('namespace', enc('phases'))
467 part.addparam('key', enc(newremotehead.hex()))
467 part.addparam('key', enc(newremotehead.hex()))
468 part.addparam('old', enc(str(phases.draft)))
468 part.addparam('old', enc(str(phases.draft)))
469 part.addparam('new', enc(str(phases.public)))
469 part.addparam('new', enc(str(phases.public)))
470 part2node.append((part.id, newremotehead))
470 part2node.append((part.id, newremotehead))
471 def handlereply(op):
471 def handlereply(op):
472 for partid, node in part2node:
472 for partid, node in part2node:
473 partrep = op.records.getreplies(partid)
473 partrep = op.records.getreplies(partid)
474 results = partrep['pushkey']
474 results = partrep['pushkey']
475 assert len(results) <= 1
475 assert len(results) <= 1
476 msg = None
476 msg = None
477 if not results:
477 if not results:
478 msg = _('server ignored update of %s to public!\n') % node
478 msg = _('server ignored update of %s to public!\n') % node
479 elif not int(results[0]['return']):
479 elif not int(results[0]['return']):
480 msg = _('updating %s to public failed!\n') % node
480 msg = _('updating %s to public failed!\n') % node
481 if msg is not None:
481 if msg is not None:
482 pushop.ui.warn(msg)
482 pushop.ui.warn(msg)
483 return handlereply
483 return handlereply
484
484
485 @b2partsgenerator('obsmarkers')
485 @b2partsgenerator('obsmarkers')
486 def _pushb2obsmarkers(pushop, bundler):
486 def _pushb2obsmarkers(pushop, bundler):
487 if 'obsmarkers' in pushop.stepsdone:
487 if 'obsmarkers' in pushop.stepsdone:
488 return
488 return
489 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
489 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
490 if obsolete.commonversion(remoteversions) is None:
490 if obsolete.commonversion(remoteversions) is None:
491 return
491 return
492 pushop.stepsdone.add('obsmarkers')
492 pushop.stepsdone.add('obsmarkers')
493 if pushop.outobsmarkers:
493 if pushop.outobsmarkers:
494 buildobsmarkerspart(bundler, pushop.outobsmarkers)
494 buildobsmarkerspart(bundler, pushop.outobsmarkers)
495
495
496 @b2partsgenerator('bookmarks')
496 @b2partsgenerator('bookmarks')
497 def _pushb2bookmarks(pushop, bundler):
497 def _pushb2bookmarks(pushop, bundler):
498 """handle phase push through bundle2"""
498 """handle phase push through bundle2"""
499 if 'bookmarks' in pushop.stepsdone:
499 if 'bookmarks' in pushop.stepsdone:
500 return
500 return
501 b2caps = bundle2.bundle2caps(pushop.remote)
501 b2caps = bundle2.bundle2caps(pushop.remote)
502 if 'b2x:pushkey' not in b2caps:
502 if 'b2x:pushkey' not in b2caps:
503 return
503 return
504 pushop.stepsdone.add('bookmarks')
504 pushop.stepsdone.add('bookmarks')
505 part2book = []
505 part2book = []
506 enc = pushkey.encode
506 enc = pushkey.encode
507 for book, old, new in pushop.outbookmarks:
507 for book, old, new in pushop.outbookmarks:
508 part = bundler.newpart('b2x:pushkey')
508 part = bundler.newpart('b2x:pushkey')
509 part.addparam('namespace', enc('bookmarks'))
509 part.addparam('namespace', enc('bookmarks'))
510 part.addparam('key', enc(book))
510 part.addparam('key', enc(book))
511 part.addparam('old', enc(old))
511 part.addparam('old', enc(old))
512 part.addparam('new', enc(new))
512 part.addparam('new', enc(new))
513 action = 'update'
513 action = 'update'
514 if not old:
514 if not old:
515 action = 'export'
515 action = 'export'
516 elif not new:
516 elif not new:
517 action = 'delete'
517 action = 'delete'
518 part2book.append((part.id, book, action))
518 part2book.append((part.id, book, action))
519
519
520
520
521 def handlereply(op):
521 def handlereply(op):
522 ui = pushop.ui
522 ui = pushop.ui
523 for partid, book, action in part2book:
523 for partid, book, action in part2book:
524 partrep = op.records.getreplies(partid)
524 partrep = op.records.getreplies(partid)
525 results = partrep['pushkey']
525 results = partrep['pushkey']
526 assert len(results) <= 1
526 assert len(results) <= 1
527 if not results:
527 if not results:
528 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
528 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
529 else:
529 else:
530 ret = int(results[0]['return'])
530 ret = int(results[0]['return'])
531 if ret:
531 if ret:
532 ui.status(bookmsgmap[action][0] % book)
532 ui.status(bookmsgmap[action][0] % book)
533 else:
533 else:
534 ui.warn(bookmsgmap[action][1] % book)
534 ui.warn(bookmsgmap[action][1] % book)
535 if pushop.bkresult is not None:
535 if pushop.bkresult is not None:
536 pushop.bkresult = 1
536 pushop.bkresult = 1
537 return handlereply
537 return handlereply
538
538
539
539
540 def _pushbundle2(pushop):
540 def _pushbundle2(pushop):
541 """push data to the remote using bundle2
541 """push data to the remote using bundle2
542
542
543 The only currently supported type of data is changegroup but this will
543 The only currently supported type of data is changegroup but this will
544 evolve in the future."""
544 evolve in the future."""
545 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
545 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
546 # create reply capability
546 # create reply capability
547 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
547 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
548 bundler.newpart('b2x:replycaps', data=capsblob)
548 bundler.newpart('b2x:replycaps', data=capsblob)
549 replyhandlers = []
549 replyhandlers = []
550 for partgenname in b2partsgenorder:
550 for partgenname in b2partsgenorder:
551 partgen = b2partsgenmapping[partgenname]
551 partgen = b2partsgenmapping[partgenname]
552 ret = partgen(pushop, bundler)
552 ret = partgen(pushop, bundler)
553 if callable(ret):
553 if callable(ret):
554 replyhandlers.append(ret)
554 replyhandlers.append(ret)
555 # do not push if nothing to push
555 # do not push if nothing to push
556 if bundler.nbparts <= 1:
556 if bundler.nbparts <= 1:
557 return
557 return
558 stream = util.chunkbuffer(bundler.getchunks())
558 stream = util.chunkbuffer(bundler.getchunks())
559 try:
559 try:
560 reply = pushop.remote.unbundle(stream, ['force'], 'push')
560 reply = pushop.remote.unbundle(stream, ['force'], 'push')
561 except error.BundleValueError, exc:
561 except error.BundleValueError, exc:
562 raise util.Abort('missing support for %s' % exc)
562 raise util.Abort('missing support for %s' % exc)
563 try:
563 try:
564 op = bundle2.processbundle(pushop.repo, reply)
564 op = bundle2.processbundle(pushop.repo, reply)
565 except error.BundleValueError, exc:
565 except error.BundleValueError, exc:
566 raise util.Abort('missing support for %s' % exc)
566 raise util.Abort('missing support for %s' % exc)
567 for rephand in replyhandlers:
567 for rephand in replyhandlers:
568 rephand(op)
568 rephand(op)
569
569
570 def _pushchangeset(pushop):
570 def _pushchangeset(pushop):
571 """Make the actual push of changeset bundle to remote repo"""
571 """Make the actual push of changeset bundle to remote repo"""
572 if 'changesets' in pushop.stepsdone:
572 if 'changesets' in pushop.stepsdone:
573 return
573 return
574 pushop.stepsdone.add('changesets')
574 pushop.stepsdone.add('changesets')
575 if not _pushcheckoutgoing(pushop):
575 if not _pushcheckoutgoing(pushop):
576 return
576 return
577 pushop.repo.prepushoutgoinghooks(pushop.repo,
577 pushop.repo.prepushoutgoinghooks(pushop.repo,
578 pushop.remote,
578 pushop.remote,
579 pushop.outgoing)
579 pushop.outgoing)
580 outgoing = pushop.outgoing
580 outgoing = pushop.outgoing
581 unbundle = pushop.remote.capable('unbundle')
581 unbundle = pushop.remote.capable('unbundle')
582 # TODO: get bundlecaps from remote
582 # TODO: get bundlecaps from remote
583 bundlecaps = None
583 bundlecaps = None
584 # create a changegroup from local
584 # create a changegroup from local
585 if pushop.revs is None and not (outgoing.excluded
585 if pushop.revs is None and not (outgoing.excluded
586 or pushop.repo.changelog.filteredrevs):
586 or pushop.repo.changelog.filteredrevs):
587 # push everything,
587 # push everything,
588 # use the fast path, no race possible on push
588 # use the fast path, no race possible on push
589 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
589 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
590 cg = changegroup.getsubset(pushop.repo,
590 cg = changegroup.getsubset(pushop.repo,
591 outgoing,
591 outgoing,
592 bundler,
592 bundler,
593 'push',
593 'push',
594 fastpath=True)
594 fastpath=True)
595 else:
595 else:
596 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
596 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
597 bundlecaps)
597 bundlecaps)
598
598
599 # apply changegroup to remote
599 # apply changegroup to remote
600 if unbundle:
600 if unbundle:
601 # local repo finds heads on server, finds out what
601 # local repo finds heads on server, finds out what
602 # revs it must push. once revs transferred, if server
602 # revs it must push. once revs transferred, if server
603 # finds it has different heads (someone else won
603 # finds it has different heads (someone else won
604 # commit/push race), server aborts.
604 # commit/push race), server aborts.
605 if pushop.force:
605 if pushop.force:
606 remoteheads = ['force']
606 remoteheads = ['force']
607 else:
607 else:
608 remoteheads = pushop.remoteheads
608 remoteheads = pushop.remoteheads
609 # ssh: return remote's addchangegroup()
609 # ssh: return remote's addchangegroup()
610 # http: return remote's addchangegroup() or 0 for error
610 # http: return remote's addchangegroup() or 0 for error
611 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
611 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
612 pushop.repo.url())
612 pushop.repo.url())
613 else:
613 else:
614 # we return an integer indicating remote head count
614 # we return an integer indicating remote head count
615 # change
615 # change
616 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
616 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
617 pushop.repo.url())
617 pushop.repo.url())
618
618
619 def _pushsyncphase(pushop):
619 def _pushsyncphase(pushop):
620 """synchronise phase information locally and remotely"""
620 """synchronise phase information locally and remotely"""
621 cheads = pushop.commonheads
621 cheads = pushop.commonheads
622 # even when we don't push, exchanging phase data is useful
622 # even when we don't push, exchanging phase data is useful
623 remotephases = pushop.remote.listkeys('phases')
623 remotephases = pushop.remote.listkeys('phases')
624 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
624 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
625 and remotephases # server supports phases
625 and remotephases # server supports phases
626 and pushop.cgresult is None # nothing was pushed
626 and pushop.cgresult is None # nothing was pushed
627 and remotephases.get('publishing', False)):
627 and remotephases.get('publishing', False)):
628 # When:
628 # When:
629 # - this is a subrepo push
629 # - this is a subrepo push
630 # - and remote support phase
630 # - and remote support phase
631 # - and no changeset was pushed
631 # - and no changeset was pushed
632 # - and remote is publishing
632 # - and remote is publishing
633 # We may be in issue 3871 case!
633 # We may be in issue 3871 case!
634 # We drop the possible phase synchronisation done by
634 # We drop the possible phase synchronisation done by
635 # courtesy to publish changesets possibly locally draft
635 # courtesy to publish changesets possibly locally draft
636 # on the remote.
636 # on the remote.
637 remotephases = {'publishing': 'True'}
637 remotephases = {'publishing': 'True'}
638 if not remotephases: # old server or public only reply from non-publishing
638 if not remotephases: # old server or public only reply from non-publishing
639 _localphasemove(pushop, cheads)
639 _localphasemove(pushop, cheads)
640 # don't push any phase data as there is nothing to push
640 # don't push any phase data as there is nothing to push
641 else:
641 else:
642 ana = phases.analyzeremotephases(pushop.repo, cheads,
642 ana = phases.analyzeremotephases(pushop.repo, cheads,
643 remotephases)
643 remotephases)
644 pheads, droots = ana
644 pheads, droots = ana
645 ### Apply remote phase on local
645 ### Apply remote phase on local
646 if remotephases.get('publishing', False):
646 if remotephases.get('publishing', False):
647 _localphasemove(pushop, cheads)
647 _localphasemove(pushop, cheads)
648 else: # publish = False
648 else: # publish = False
649 _localphasemove(pushop, pheads)
649 _localphasemove(pushop, pheads)
650 _localphasemove(pushop, cheads, phases.draft)
650 _localphasemove(pushop, cheads, phases.draft)
651 ### Apply local phase on remote
651 ### Apply local phase on remote
652
652
653 if pushop.cgresult:
653 if pushop.cgresult:
654 if 'phases' in pushop.stepsdone:
654 if 'phases' in pushop.stepsdone:
655 # phases already pushed though bundle2
655 # phases already pushed though bundle2
656 return
656 return
657 outdated = pushop.outdatedphases
657 outdated = pushop.outdatedphases
658 else:
658 else:
659 outdated = pushop.fallbackoutdatedphases
659 outdated = pushop.fallbackoutdatedphases
660
660
661 pushop.stepsdone.add('phases')
661 pushop.stepsdone.add('phases')
662
662
663 # filter heads already turned public by the push
663 # filter heads already turned public by the push
664 outdated = [c for c in outdated if c.node() not in pheads]
664 outdated = [c for c in outdated if c.node() not in pheads]
665 b2caps = bundle2.bundle2caps(pushop.remote)
665 b2caps = bundle2.bundle2caps(pushop.remote)
666 if 'b2x:pushkey' in b2caps:
666 if 'b2x:pushkey' in b2caps:
667 # server supports bundle2, let's do a batched push through it
667 # server supports bundle2, let's do a batched push through it
668 #
668 #
669 # This will eventually be unified with the changesets bundle2 push
669 # This will eventually be unified with the changesets bundle2 push
670 bundler = bundle2.bundle20(pushop.ui, b2caps)
670 bundler = bundle2.bundle20(pushop.ui, b2caps)
671 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
671 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
672 bundler.newpart('b2x:replycaps', data=capsblob)
672 bundler.newpart('b2x:replycaps', data=capsblob)
673 part2node = []
673 part2node = []
674 enc = pushkey.encode
674 enc = pushkey.encode
675 for newremotehead in outdated:
675 for newremotehead in outdated:
676 part = bundler.newpart('b2x:pushkey')
676 part = bundler.newpart('b2x:pushkey')
677 part.addparam('namespace', enc('phases'))
677 part.addparam('namespace', enc('phases'))
678 part.addparam('key', enc(newremotehead.hex()))
678 part.addparam('key', enc(newremotehead.hex()))
679 part.addparam('old', enc(str(phases.draft)))
679 part.addparam('old', enc(str(phases.draft)))
680 part.addparam('new', enc(str(phases.public)))
680 part.addparam('new', enc(str(phases.public)))
681 part2node.append((part.id, newremotehead))
681 part2node.append((part.id, newremotehead))
682 stream = util.chunkbuffer(bundler.getchunks())
682 stream = util.chunkbuffer(bundler.getchunks())
683 try:
683 try:
684 reply = pushop.remote.unbundle(stream, ['force'], 'push')
684 reply = pushop.remote.unbundle(stream, ['force'], 'push')
685 op = bundle2.processbundle(pushop.repo, reply)
685 op = bundle2.processbundle(pushop.repo, reply)
686 except error.BundleValueError, exc:
686 except error.BundleValueError, exc:
687 raise util.Abort('missing support for %s' % exc)
687 raise util.Abort('missing support for %s' % exc)
688 for partid, node in part2node:
688 for partid, node in part2node:
689 partrep = op.records.getreplies(partid)
689 partrep = op.records.getreplies(partid)
690 results = partrep['pushkey']
690 results = partrep['pushkey']
691 assert len(results) <= 1
691 assert len(results) <= 1
692 msg = None
692 msg = None
693 if not results:
693 if not results:
694 msg = _('server ignored update of %s to public!\n') % node
694 msg = _('server ignored update of %s to public!\n') % node
695 elif not int(results[0]['return']):
695 elif not int(results[0]['return']):
696 msg = _('updating %s to public failed!\n') % node
696 msg = _('updating %s to public failed!\n') % node
697 if msg is not None:
697 if msg is not None:
698 pushop.ui.warn(msg)
698 pushop.ui.warn(msg)
699
699
700 else:
700 else:
701 # fallback to independant pushkey command
701 # fallback to independant pushkey command
702 for newremotehead in outdated:
702 for newremotehead in outdated:
703 r = pushop.remote.pushkey('phases',
703 r = pushop.remote.pushkey('phases',
704 newremotehead.hex(),
704 newremotehead.hex(),
705 str(phases.draft),
705 str(phases.draft),
706 str(phases.public))
706 str(phases.public))
707 if not r:
707 if not r:
708 pushop.ui.warn(_('updating %s to public failed!\n')
708 pushop.ui.warn(_('updating %s to public failed!\n')
709 % newremotehead)
709 % newremotehead)
710
710
711 def _localphasemove(pushop, nodes, phase=phases.public):
711 def _localphasemove(pushop, nodes, phase=phases.public):
712 """move <nodes> to <phase> in the local source repo"""
712 """move <nodes> to <phase> in the local source repo"""
713 if pushop.locallocked:
713 if pushop.locallocked:
714 tr = pushop.repo.transaction('push-phase-sync')
714 tr = pushop.repo.transaction('push-phase-sync')
715 try:
715 try:
716 phases.advanceboundary(pushop.repo, tr, phase, nodes)
716 phases.advanceboundary(pushop.repo, tr, phase, nodes)
717 tr.close()
717 tr.close()
718 finally:
718 finally:
719 tr.release()
719 tr.release()
720 else:
720 else:
721 # repo is not locked, do not change any phases!
721 # repo is not locked, do not change any phases!
722 # Informs the user that phases should have been moved when
722 # Informs the user that phases should have been moved when
723 # applicable.
723 # applicable.
724 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
724 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
725 phasestr = phases.phasenames[phase]
725 phasestr = phases.phasenames[phase]
726 if actualmoves:
726 if actualmoves:
727 pushop.ui.status(_('cannot lock source repo, skipping '
727 pushop.ui.status(_('cannot lock source repo, skipping '
728 'local %s phase update\n') % phasestr)
728 'local %s phase update\n') % phasestr)
729
729
730 def _pushobsolete(pushop):
730 def _pushobsolete(pushop):
731 """utility function to push obsolete markers to a remote"""
731 """utility function to push obsolete markers to a remote"""
732 if 'obsmarkers' in pushop.stepsdone:
732 if 'obsmarkers' in pushop.stepsdone:
733 return
733 return
734 pushop.ui.debug('try to push obsolete markers to remote\n')
734 pushop.ui.debug('try to push obsolete markers to remote\n')
735 repo = pushop.repo
735 repo = pushop.repo
736 remote = pushop.remote
736 remote = pushop.remote
737 pushop.stepsdone.add('obsmarkers')
737 pushop.stepsdone.add('obsmarkers')
738 if pushop.outobsmarkers:
738 if pushop.outobsmarkers:
739 rslts = []
739 rslts = []
740 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
740 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
741 for key in sorted(remotedata, reverse=True):
741 for key in sorted(remotedata, reverse=True):
742 # reverse sort to ensure we end with dump0
742 # reverse sort to ensure we end with dump0
743 data = remotedata[key]
743 data = remotedata[key]
744 rslts.append(remote.pushkey('obsolete', key, '', data))
744 rslts.append(remote.pushkey('obsolete', key, '', data))
745 if [r for r in rslts if not r]:
745 if [r for r in rslts if not r]:
746 msg = _('failed to push some obsolete markers!\n')
746 msg = _('failed to push some obsolete markers!\n')
747 repo.ui.warn(msg)
747 repo.ui.warn(msg)
748
748
749 def _pushbookmark(pushop):
749 def _pushbookmark(pushop):
750 """Update bookmark position on remote"""
750 """Update bookmark position on remote"""
751 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
751 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
752 return
752 return
753 pushop.stepsdone.add('bookmarks')
753 pushop.stepsdone.add('bookmarks')
754 ui = pushop.ui
754 ui = pushop.ui
755 remote = pushop.remote
755 remote = pushop.remote
756
756
757 for b, old, new in pushop.outbookmarks:
757 for b, old, new in pushop.outbookmarks:
758 action = 'update'
758 action = 'update'
759 if not old:
759 if not old:
760 action = 'export'
760 action = 'export'
761 elif not new:
761 elif not new:
762 action = 'delete'
762 action = 'delete'
763 if remote.pushkey('bookmarks', b, old, new):
763 if remote.pushkey('bookmarks', b, old, new):
764 ui.status(bookmsgmap[action][0] % b)
764 ui.status(bookmsgmap[action][0] % b)
765 else:
765 else:
766 ui.warn(bookmsgmap[action][1] % b)
766 ui.warn(bookmsgmap[action][1] % b)
767 # discovery can have set the value form invalid entry
767 # discovery can have set the value form invalid entry
768 if pushop.bkresult is not None:
768 if pushop.bkresult is not None:
769 pushop.bkresult = 1
769 pushop.bkresult = 1
770
770
771 class pulloperation(object):
771 class pulloperation(object):
772 """A object that represent a single pull operation
772 """A object that represent a single pull operation
773
773
774 It purpose is to carry push related state and very common operation.
774 It purpose is to carry push related state and very common operation.
775
775
776 A new should be created at the beginning of each pull and discarded
776 A new should be created at the beginning of each pull and discarded
777 afterward.
777 afterward.
778 """
778 """
779
779
780 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
780 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
781 # repo we pull into
781 # repo we pull into
782 self.repo = repo
782 self.repo = repo
783 # repo we pull from
783 # repo we pull from
784 self.remote = remote
784 self.remote = remote
785 # revision we try to pull (None is "all")
785 # revision we try to pull (None is "all")
786 self.heads = heads
786 self.heads = heads
787 # bookmark pulled explicitly
787 # bookmark pulled explicitly
788 self.explicitbookmarks = bookmarks
788 self.explicitbookmarks = bookmarks
789 # do we force pull?
789 # do we force pull?
790 self.force = force
790 self.force = force
791 # the name the pull transaction
791 # the name the pull transaction
792 self._trname = 'pull\n' + util.hidepassword(remote.url())
792 self._trname = 'pull\n' + util.hidepassword(remote.url())
793 # hold the transaction once created
793 # hold the transaction once created
794 self._tr = None
794 self._tr = None
795 # set of common changeset between local and remote before pull
795 # set of common changeset between local and remote before pull
796 self.common = None
796 self.common = None
797 # set of pulled head
797 # set of pulled head
798 self.rheads = None
798 self.rheads = None
799 # list of missing changeset to fetch remotely
799 # list of missing changeset to fetch remotely
800 self.fetch = None
800 self.fetch = None
801 # remote bookmarks data
801 # remote bookmarks data
802 self.remotebookmarks = None
802 self.remotebookmarks = None
803 # result of changegroup pulling (used as return code by pull)
803 # result of changegroup pulling (used as return code by pull)
804 self.cgresult = None
804 self.cgresult = None
805 # list of step remaining todo (related to future bundle2 usage)
805 # list of step remaining todo (related to future bundle2 usage)
806 self.todosteps = set(['changegroup', 'phases', 'obsmarkers',
806 self.todosteps = set(['changegroup', 'phases', 'obsmarkers',
807 'bookmarks'])
807 'bookmarks'])
808
808
809 @util.propertycache
809 @util.propertycache
810 def pulledsubset(self):
810 def pulledsubset(self):
811 """heads of the set of changeset target by the pull"""
811 """heads of the set of changeset target by the pull"""
812 # compute target subset
812 # compute target subset
813 if self.heads is None:
813 if self.heads is None:
814 # We pulled every thing possible
814 # We pulled every thing possible
815 # sync on everything common
815 # sync on everything common
816 c = set(self.common)
816 c = set(self.common)
817 ret = list(self.common)
817 ret = list(self.common)
818 for n in self.rheads:
818 for n in self.rheads:
819 if n not in c:
819 if n not in c:
820 ret.append(n)
820 ret.append(n)
821 return ret
821 return ret
822 else:
822 else:
823 # We pulled a specific subset
823 # We pulled a specific subset
824 # sync on this subset
824 # sync on this subset
825 return self.heads
825 return self.heads
826
826
827 def gettransaction(self):
827 def gettransaction(self):
828 """get appropriate pull transaction, creating it if needed"""
828 """get appropriate pull transaction, creating it if needed"""
829 if self._tr is None:
829 if self._tr is None:
830 self._tr = self.repo.transaction(self._trname)
830 self._tr = self.repo.transaction(self._trname)
831 return self._tr
831 return self._tr
832
832
833 def closetransaction(self):
833 def closetransaction(self):
834 """close transaction if created"""
834 """close transaction if created"""
835 if self._tr is not None:
835 if self._tr is not None:
836 self._tr.close()
836 self._tr.close()
837
837
838 def releasetransaction(self):
838 def releasetransaction(self):
839 """release transaction if created"""
839 """release transaction if created"""
840 if self._tr is not None:
840 if self._tr is not None:
841 self._tr.release()
841 self._tr.release()
842
842
843 def pull(repo, remote, heads=None, force=False, bookmarks=()):
843 def pull(repo, remote, heads=None, force=False, bookmarks=()):
844 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
844 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
845 if pullop.remote.local():
845 if pullop.remote.local():
846 missing = set(pullop.remote.requirements) - pullop.repo.supported
846 missing = set(pullop.remote.requirements) - pullop.repo.supported
847 if missing:
847 if missing:
848 msg = _("required features are not"
848 msg = _("required features are not"
849 " supported in the destination:"
849 " supported in the destination:"
850 " %s") % (', '.join(sorted(missing)))
850 " %s") % (', '.join(sorted(missing)))
851 raise util.Abort(msg)
851 raise util.Abort(msg)
852
852
853 pullop.remotebookmarks = remote.listkeys('bookmarks')
853 pullop.remotebookmarks = remote.listkeys('bookmarks')
854 lock = pullop.repo.lock()
854 lock = pullop.repo.lock()
855 try:
855 try:
856 _pulldiscovery(pullop)
856 _pulldiscovery(pullop)
857 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
857 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
858 and pullop.remote.capable('bundle2-exp')):
858 and pullop.remote.capable('bundle2-exp')):
859 _pullbundle2(pullop)
859 _pullbundle2(pullop)
860 _pullchangeset(pullop)
860 _pullchangeset(pullop)
861 _pullphase(pullop)
861 _pullphase(pullop)
862 _pullbookmarks(pullop)
862 _pullbookmarks(pullop)
863 _pullobsolete(pullop)
863 _pullobsolete(pullop)
864 pullop.closetransaction()
864 pullop.closetransaction()
865 finally:
865 finally:
866 pullop.releasetransaction()
866 pullop.releasetransaction()
867 lock.release()
867 lock.release()
868
868
869 return pullop.cgresult
869 return pullop.cgresult
870
870
871 def _pulldiscovery(pullop):
871 def _pulldiscovery(pullop):
872 """discovery phase for the pull
872 """discovery phase for the pull
873
873
874 Current handle changeset discovery only, will change handle all discovery
874 Current handle changeset discovery only, will change handle all discovery
875 at some point."""
875 at some point."""
876 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
876 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
877 pullop.remote,
877 pullop.remote,
878 heads=pullop.heads,
878 heads=pullop.heads,
879 force=pullop.force)
879 force=pullop.force)
880 pullop.common, pullop.fetch, pullop.rheads = tmp
880 pullop.common, pullop.fetch, pullop.rheads = tmp
881
881
882 def _pullbundle2(pullop):
882 def _pullbundle2(pullop):
883 """pull data using bundle2
883 """pull data using bundle2
884
884
885 For now, the only supported data are changegroup."""
885 For now, the only supported data are changegroup."""
886 remotecaps = bundle2.bundle2caps(pullop.remote)
886 remotecaps = bundle2.bundle2caps(pullop.remote)
887 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
887 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
888 # pulling changegroup
888 # pulling changegroup
889 pullop.todosteps.remove('changegroup')
889 pullop.todosteps.remove('changegroup')
890
890
891 kwargs['common'] = pullop.common
891 kwargs['common'] = pullop.common
892 kwargs['heads'] = pullop.heads or pullop.rheads
892 kwargs['heads'] = pullop.heads or pullop.rheads
893 kwargs['cg'] = pullop.fetch
893 kwargs['cg'] = pullop.fetch
894 if 'b2x:listkeys' in remotecaps:
894 if 'b2x:listkeys' in remotecaps:
895 kwargs['listkeys'] = ['phase', 'bookmarks']
895 kwargs['listkeys'] = ['phase', 'bookmarks']
896 if not pullop.fetch:
896 if not pullop.fetch:
897 pullop.repo.ui.status(_("no changes found\n"))
897 pullop.repo.ui.status(_("no changes found\n"))
898 pullop.cgresult = 0
898 pullop.cgresult = 0
899 else:
899 else:
900 if pullop.heads is None and list(pullop.common) == [nullid]:
900 if pullop.heads is None and list(pullop.common) == [nullid]:
901 pullop.repo.ui.status(_("requesting all changes\n"))
901 pullop.repo.ui.status(_("requesting all changes\n"))
902 if obsolete._enabled:
902 if obsolete._enabled:
903 remoteversions = bundle2.obsmarkersversion(remotecaps)
903 remoteversions = bundle2.obsmarkersversion(remotecaps)
904 if obsolete.commonversion(remoteversions) is not None:
904 if obsolete.commonversion(remoteversions) is not None:
905 kwargs['obsmarkers'] = True
905 kwargs['obsmarkers'] = True
906 pullop.todosteps.remove('obsmarkers')
906 pullop.todosteps.remove('obsmarkers')
907 _pullbundle2extraprepare(pullop, kwargs)
907 _pullbundle2extraprepare(pullop, kwargs)
908 if kwargs.keys() == ['format']:
908 if kwargs.keys() == ['format']:
909 return # nothing to pull
909 return # nothing to pull
910 bundle = pullop.remote.getbundle('pull', **kwargs)
910 bundle = pullop.remote.getbundle('pull', **kwargs)
911 try:
911 try:
912 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
912 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
913 except error.BundleValueError, exc:
913 except error.BundleValueError, exc:
914 raise util.Abort('missing support for %s' % exc)
914 raise util.Abort('missing support for %s' % exc)
915
915
916 if pullop.fetch:
916 if pullop.fetch:
917 assert len(op.records['changegroup']) == 1
917 assert len(op.records['changegroup']) == 1
918 pullop.cgresult = op.records['changegroup'][0]['return']
918 pullop.cgresult = op.records['changegroup'][0]['return']
919
919
920 # processing phases change
920 # processing phases change
921 for namespace, value in op.records['listkeys']:
921 for namespace, value in op.records['listkeys']:
922 if namespace == 'phases':
922 if namespace == 'phases':
923 _pullapplyphases(pullop, value)
923 _pullapplyphases(pullop, value)
924
924
925 # processing bookmark update
925 # processing bookmark update
926 for namespace, value in op.records['listkeys']:
926 for namespace, value in op.records['listkeys']:
927 if namespace == 'bookmarks':
927 if namespace == 'bookmarks':
928 pullop.remotebookmarks = value
928 pullop.remotebookmarks = value
929 _pullbookmarks(pullop)
929 _pullbookmarks(pullop)
930
930
931 def _pullbundle2extraprepare(pullop, kwargs):
931 def _pullbundle2extraprepare(pullop, kwargs):
932 """hook function so that extensions can extend the getbundle call"""
932 """hook function so that extensions can extend the getbundle call"""
933 pass
933 pass
934
934
935 def _pullchangeset(pullop):
935 def _pullchangeset(pullop):
936 """pull changeset from unbundle into the local repo"""
936 """pull changeset from unbundle into the local repo"""
937 # We delay the open of the transaction as late as possible so we
937 # We delay the open of the transaction as late as possible so we
938 # don't open transaction for nothing or you break future useful
938 # don't open transaction for nothing or you break future useful
939 # rollback call
939 # rollback call
940 if 'changegroup' not in pullop.todosteps:
940 if 'changegroup' not in pullop.todosteps:
941 return
941 return
942 pullop.todosteps.remove('changegroup')
942 pullop.todosteps.remove('changegroup')
943 if not pullop.fetch:
943 if not pullop.fetch:
944 pullop.repo.ui.status(_("no changes found\n"))
944 pullop.repo.ui.status(_("no changes found\n"))
945 pullop.cgresult = 0
945 pullop.cgresult = 0
946 return
946 return
947 pullop.gettransaction()
947 pullop.gettransaction()
948 if pullop.heads is None and list(pullop.common) == [nullid]:
948 if pullop.heads is None and list(pullop.common) == [nullid]:
949 pullop.repo.ui.status(_("requesting all changes\n"))
949 pullop.repo.ui.status(_("requesting all changes\n"))
950 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
950 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
951 # issue1320, avoid a race if remote changed after discovery
951 # issue1320, avoid a race if remote changed after discovery
952 pullop.heads = pullop.rheads
952 pullop.heads = pullop.rheads
953
953
954 if pullop.remote.capable('getbundle'):
954 if pullop.remote.capable('getbundle'):
955 # TODO: get bundlecaps from remote
955 # TODO: get bundlecaps from remote
956 cg = pullop.remote.getbundle('pull', common=pullop.common,
956 cg = pullop.remote.getbundle('pull', common=pullop.common,
957 heads=pullop.heads or pullop.rheads)
957 heads=pullop.heads or pullop.rheads)
958 elif pullop.heads is None:
958 elif pullop.heads is None:
959 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
959 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
960 elif not pullop.remote.capable('changegroupsubset'):
960 elif not pullop.remote.capable('changegroupsubset'):
961 raise util.Abort(_("partial pull cannot be done because "
961 raise util.Abort(_("partial pull cannot be done because "
962 "other repository doesn't support "
962 "other repository doesn't support "
963 "changegroupsubset."))
963 "changegroupsubset."))
964 else:
964 else:
965 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
965 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
966 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
966 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
967 pullop.remote.url())
967 pullop.remote.url())
968
968
969 def _pullphase(pullop):
969 def _pullphase(pullop):
970 # Get remote phases data from remote
970 # Get remote phases data from remote
971 if 'phases' not in pullop.todosteps:
971 if 'phases' not in pullop.todosteps:
972 return
972 return
973 remotephases = pullop.remote.listkeys('phases')
973 remotephases = pullop.remote.listkeys('phases')
974 _pullapplyphases(pullop, remotephases)
974 _pullapplyphases(pullop, remotephases)
975
975
976 def _pullapplyphases(pullop, remotephases):
976 def _pullapplyphases(pullop, remotephases):
977 """apply phase movement from observed remote state"""
977 """apply phase movement from observed remote state"""
978 pullop.todosteps.remove('phases')
978 pullop.todosteps.remove('phases')
979 publishing = bool(remotephases.get('publishing', False))
979 publishing = bool(remotephases.get('publishing', False))
980 if remotephases and not publishing:
980 if remotephases and not publishing:
981 # remote is new and unpublishing
981 # remote is new and unpublishing
982 pheads, _dr = phases.analyzeremotephases(pullop.repo,
982 pheads, _dr = phases.analyzeremotephases(pullop.repo,
983 pullop.pulledsubset,
983 pullop.pulledsubset,
984 remotephases)
984 remotephases)
985 dheads = pullop.pulledsubset
985 dheads = pullop.pulledsubset
986 else:
986 else:
987 # Remote is old or publishing all common changesets
987 # Remote is old or publishing all common changesets
988 # should be seen as public
988 # should be seen as public
989 pheads = pullop.pulledsubset
989 pheads = pullop.pulledsubset
990 dheads = []
990 dheads = []
991 unfi = pullop.repo.unfiltered()
991 unfi = pullop.repo.unfiltered()
992 phase = unfi._phasecache.phase
992 phase = unfi._phasecache.phase
993 rev = unfi.changelog.nodemap.get
993 rev = unfi.changelog.nodemap.get
994 public = phases.public
994 public = phases.public
995 draft = phases.draft
995 draft = phases.draft
996
996
997 # exclude changesets already public locally and update the others
997 # exclude changesets already public locally and update the others
998 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
998 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
999 if pheads:
999 if pheads:
1000 tr = pullop.gettransaction()
1000 tr = pullop.gettransaction()
1001 phases.advanceboundary(pullop.repo, tr, public, pheads)
1001 phases.advanceboundary(pullop.repo, tr, public, pheads)
1002
1002
1003 # exclude changesets already draft locally and update the others
1003 # exclude changesets already draft locally and update the others
1004 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1004 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1005 if dheads:
1005 if dheads:
1006 tr = pullop.gettransaction()
1006 tr = pullop.gettransaction()
1007 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1007 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1008
1008
1009 def _pullbookmarks(pullop):
1009 def _pullbookmarks(pullop):
1010 """process the remote bookmark information to update the local one"""
1010 """process the remote bookmark information to update the local one"""
1011 if 'bookmarks' not in pullop.todosteps:
1011 if 'bookmarks' not in pullop.todosteps:
1012 return
1012 return
1013 pullop.todosteps.remove('bookmarks')
1013 pullop.todosteps.remove('bookmarks')
1014 repo = pullop.repo
1014 repo = pullop.repo
1015 remotebookmarks = pullop.remotebookmarks
1015 remotebookmarks = pullop.remotebookmarks
1016 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1016 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1017 pullop.remote.url(),
1017 pullop.remote.url(),
1018 pullop.gettransaction,
1018 explicit=pullop.explicitbookmarks)
1019 explicit=pullop.explicitbookmarks)
1019
1020
1020 def _pullobsolete(pullop):
1021 def _pullobsolete(pullop):
1021 """utility function to pull obsolete markers from a remote
1022 """utility function to pull obsolete markers from a remote
1022
1023
1023 The `gettransaction` is function that return the pull transaction, creating
1024 The `gettransaction` is function that return the pull transaction, creating
1024 one if necessary. We return the transaction to inform the calling code that
1025 one if necessary. We return the transaction to inform the calling code that
1025 a new transaction have been created (when applicable).
1026 a new transaction have been created (when applicable).
1026
1027
1027 Exists mostly to allow overriding for experimentation purpose"""
1028 Exists mostly to allow overriding for experimentation purpose"""
1028 if 'obsmarkers' not in pullop.todosteps:
1029 if 'obsmarkers' not in pullop.todosteps:
1029 return
1030 return
1030 pullop.todosteps.remove('obsmarkers')
1031 pullop.todosteps.remove('obsmarkers')
1031 tr = None
1032 tr = None
1032 if obsolete._enabled:
1033 if obsolete._enabled:
1033 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1034 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1034 remoteobs = pullop.remote.listkeys('obsolete')
1035 remoteobs = pullop.remote.listkeys('obsolete')
1035 if 'dump0' in remoteobs:
1036 if 'dump0' in remoteobs:
1036 tr = pullop.gettransaction()
1037 tr = pullop.gettransaction()
1037 for key in sorted(remoteobs, reverse=True):
1038 for key in sorted(remoteobs, reverse=True):
1038 if key.startswith('dump'):
1039 if key.startswith('dump'):
1039 data = base85.b85decode(remoteobs[key])
1040 data = base85.b85decode(remoteobs[key])
1040 pullop.repo.obsstore.mergemarkers(tr, data)
1041 pullop.repo.obsstore.mergemarkers(tr, data)
1041 pullop.repo.invalidatevolatilesets()
1042 pullop.repo.invalidatevolatilesets()
1042 return tr
1043 return tr
1043
1044
1044 def caps20to10(repo):
1045 def caps20to10(repo):
1045 """return a set with appropriate options to use bundle20 during getbundle"""
1046 """return a set with appropriate options to use bundle20 during getbundle"""
1046 caps = set(['HG2X'])
1047 caps = set(['HG2X'])
1047 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1048 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1048 caps.add('bundle2=' + urllib.quote(capsblob))
1049 caps.add('bundle2=' + urllib.quote(capsblob))
1049 return caps
1050 return caps
1050
1051
1051 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1052 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1052 getbundle2partsorder = []
1053 getbundle2partsorder = []
1053
1054
1054 # Mapping between step name and function
1055 # Mapping between step name and function
1055 #
1056 #
1056 # This exists to help extensions wrap steps if necessary
1057 # This exists to help extensions wrap steps if necessary
1057 getbundle2partsmapping = {}
1058 getbundle2partsmapping = {}
1058
1059
1059 def getbundle2partsgenerator(stepname):
1060 def getbundle2partsgenerator(stepname):
1060 """decorator for function generating bundle2 part for getbundle
1061 """decorator for function generating bundle2 part for getbundle
1061
1062
1062 The function is added to the step -> function mapping and appended to the
1063 The function is added to the step -> function mapping and appended to the
1063 list of steps. Beware that decorated functions will be added in order
1064 list of steps. Beware that decorated functions will be added in order
1064 (this may matter).
1065 (this may matter).
1065
1066
1066 You can only use this decorator for new steps, if you want to wrap a step
1067 You can only use this decorator for new steps, if you want to wrap a step
1067 from an extension, attack the getbundle2partsmapping dictionary directly."""
1068 from an extension, attack the getbundle2partsmapping dictionary directly."""
1068 def dec(func):
1069 def dec(func):
1069 assert stepname not in getbundle2partsmapping
1070 assert stepname not in getbundle2partsmapping
1070 getbundle2partsmapping[stepname] = func
1071 getbundle2partsmapping[stepname] = func
1071 getbundle2partsorder.append(stepname)
1072 getbundle2partsorder.append(stepname)
1072 return func
1073 return func
1073 return dec
1074 return dec
1074
1075
1075 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1076 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1076 **kwargs):
1077 **kwargs):
1077 """return a full bundle (with potentially multiple kind of parts)
1078 """return a full bundle (with potentially multiple kind of parts)
1078
1079
1079 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
1080 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
1080 passed. For now, the bundle can contain only changegroup, but this will
1081 passed. For now, the bundle can contain only changegroup, but this will
1081 changes when more part type will be available for bundle2.
1082 changes when more part type will be available for bundle2.
1082
1083
1083 This is different from changegroup.getchangegroup that only returns an HG10
1084 This is different from changegroup.getchangegroup that only returns an HG10
1084 changegroup bundle. They may eventually get reunited in the future when we
1085 changegroup bundle. They may eventually get reunited in the future when we
1085 have a clearer idea of the API we what to query different data.
1086 have a clearer idea of the API we what to query different data.
1086
1087
1087 The implementation is at a very early stage and will get massive rework
1088 The implementation is at a very early stage and will get massive rework
1088 when the API of bundle is refined.
1089 when the API of bundle is refined.
1089 """
1090 """
1090 # bundle10 case
1091 # bundle10 case
1091 if bundlecaps is None or 'HG2X' not in bundlecaps:
1092 if bundlecaps is None or 'HG2X' not in bundlecaps:
1092 if bundlecaps and not kwargs.get('cg', True):
1093 if bundlecaps and not kwargs.get('cg', True):
1093 raise ValueError(_('request for bundle10 must include changegroup'))
1094 raise ValueError(_('request for bundle10 must include changegroup'))
1094
1095
1095 if kwargs:
1096 if kwargs:
1096 raise ValueError(_('unsupported getbundle arguments: %s')
1097 raise ValueError(_('unsupported getbundle arguments: %s')
1097 % ', '.join(sorted(kwargs.keys())))
1098 % ', '.join(sorted(kwargs.keys())))
1098 return changegroup.getchangegroup(repo, source, heads=heads,
1099 return changegroup.getchangegroup(repo, source, heads=heads,
1099 common=common, bundlecaps=bundlecaps)
1100 common=common, bundlecaps=bundlecaps)
1100
1101
1101 # bundle20 case
1102 # bundle20 case
1102 b2caps = {}
1103 b2caps = {}
1103 for bcaps in bundlecaps:
1104 for bcaps in bundlecaps:
1104 if bcaps.startswith('bundle2='):
1105 if bcaps.startswith('bundle2='):
1105 blob = urllib.unquote(bcaps[len('bundle2='):])
1106 blob = urllib.unquote(bcaps[len('bundle2='):])
1106 b2caps.update(bundle2.decodecaps(blob))
1107 b2caps.update(bundle2.decodecaps(blob))
1107 bundler = bundle2.bundle20(repo.ui, b2caps)
1108 bundler = bundle2.bundle20(repo.ui, b2caps)
1108
1109
1109 for name in getbundle2partsorder:
1110 for name in getbundle2partsorder:
1110 func = getbundle2partsmapping[name]
1111 func = getbundle2partsmapping[name]
1111 kwargs['heads'] = heads
1112 kwargs['heads'] = heads
1112 kwargs['common'] = common
1113 kwargs['common'] = common
1113 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1114 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1114 **kwargs)
1115 **kwargs)
1115
1116
1116 return util.chunkbuffer(bundler.getchunks())
1117 return util.chunkbuffer(bundler.getchunks())
1117
1118
1118 @getbundle2partsgenerator('changegroup')
1119 @getbundle2partsgenerator('changegroup')
1119 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1120 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1120 b2caps=None, heads=None, common=None, **kwargs):
1121 b2caps=None, heads=None, common=None, **kwargs):
1121 """add a changegroup part to the requested bundle"""
1122 """add a changegroup part to the requested bundle"""
1122 cg = None
1123 cg = None
1123 if kwargs.get('cg', True):
1124 if kwargs.get('cg', True):
1124 # build changegroup bundle here.
1125 # build changegroup bundle here.
1125 cg = changegroup.getchangegroup(repo, source, heads=heads,
1126 cg = changegroup.getchangegroup(repo, source, heads=heads,
1126 common=common, bundlecaps=bundlecaps)
1127 common=common, bundlecaps=bundlecaps)
1127
1128
1128 if cg:
1129 if cg:
1129 bundler.newpart('b2x:changegroup', data=cg.getchunks())
1130 bundler.newpart('b2x:changegroup', data=cg.getchunks())
1130
1131
1131 @getbundle2partsgenerator('listkeys')
1132 @getbundle2partsgenerator('listkeys')
1132 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1133 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1133 b2caps=None, **kwargs):
1134 b2caps=None, **kwargs):
1134 """add parts containing listkeys namespaces to the requested bundle"""
1135 """add parts containing listkeys namespaces to the requested bundle"""
1135 listkeys = kwargs.get('listkeys', ())
1136 listkeys = kwargs.get('listkeys', ())
1136 for namespace in listkeys:
1137 for namespace in listkeys:
1137 part = bundler.newpart('b2x:listkeys')
1138 part = bundler.newpart('b2x:listkeys')
1138 part.addparam('namespace', namespace)
1139 part.addparam('namespace', namespace)
1139 keys = repo.listkeys(namespace).items()
1140 keys = repo.listkeys(namespace).items()
1140 part.data = pushkey.encodekeys(keys)
1141 part.data = pushkey.encodekeys(keys)
1141
1142
1142 @getbundle2partsgenerator('obsmarkers')
1143 @getbundle2partsgenerator('obsmarkers')
1143 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1144 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1144 b2caps=None, heads=None, **kwargs):
1145 b2caps=None, heads=None, **kwargs):
1145 """add an obsolescence markers part to the requested bundle"""
1146 """add an obsolescence markers part to the requested bundle"""
1146 if kwargs.get('obsmarkers', False):
1147 if kwargs.get('obsmarkers', False):
1147 if heads is None:
1148 if heads is None:
1148 heads = repo.heads()
1149 heads = repo.heads()
1149 subset = [c.node() for c in repo.set('::%ln', heads)]
1150 subset = [c.node() for c in repo.set('::%ln', heads)]
1150 markers = repo.obsstore.relevantmarkers(subset)
1151 markers = repo.obsstore.relevantmarkers(subset)
1151 buildobsmarkerspart(bundler, markers)
1152 buildobsmarkerspart(bundler, markers)
1152
1153
1153 @getbundle2partsgenerator('extra')
1154 @getbundle2partsgenerator('extra')
1154 def _getbundleextrapart(bundler, repo, source, bundlecaps=None,
1155 def _getbundleextrapart(bundler, repo, source, bundlecaps=None,
1155 b2caps=None, **kwargs):
1156 b2caps=None, **kwargs):
1156 """hook function to let extensions add parts to the requested bundle"""
1157 """hook function to let extensions add parts to the requested bundle"""
1157 pass
1158 pass
1158
1159
1159 def check_heads(repo, their_heads, context):
1160 def check_heads(repo, their_heads, context):
1160 """check if the heads of a repo have been modified
1161 """check if the heads of a repo have been modified
1161
1162
1162 Used by peer for unbundling.
1163 Used by peer for unbundling.
1163 """
1164 """
1164 heads = repo.heads()
1165 heads = repo.heads()
1165 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1166 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1166 if not (their_heads == ['force'] or their_heads == heads or
1167 if not (their_heads == ['force'] or their_heads == heads or
1167 their_heads == ['hashed', heads_hash]):
1168 their_heads == ['hashed', heads_hash]):
1168 # someone else committed/pushed/unbundled while we
1169 # someone else committed/pushed/unbundled while we
1169 # were transferring data
1170 # were transferring data
1170 raise error.PushRaced('repository changed while %s - '
1171 raise error.PushRaced('repository changed while %s - '
1171 'please try again' % context)
1172 'please try again' % context)
1172
1173
1173 def unbundle(repo, cg, heads, source, url):
1174 def unbundle(repo, cg, heads, source, url):
1174 """Apply a bundle to a repo.
1175 """Apply a bundle to a repo.
1175
1176
1176 this function makes sure the repo is locked during the application and have
1177 this function makes sure the repo is locked during the application and have
1177 mechanism to check that no push race occurred between the creation of the
1178 mechanism to check that no push race occurred between the creation of the
1178 bundle and its application.
1179 bundle and its application.
1179
1180
1180 If the push was raced as PushRaced exception is raised."""
1181 If the push was raced as PushRaced exception is raised."""
1181 r = 0
1182 r = 0
1182 # need a transaction when processing a bundle2 stream
1183 # need a transaction when processing a bundle2 stream
1183 tr = None
1184 tr = None
1184 lock = repo.lock()
1185 lock = repo.lock()
1185 try:
1186 try:
1186 check_heads(repo, heads, 'uploading changes')
1187 check_heads(repo, heads, 'uploading changes')
1187 # push can proceed
1188 # push can proceed
1188 if util.safehasattr(cg, 'params'):
1189 if util.safehasattr(cg, 'params'):
1189 try:
1190 try:
1190 tr = repo.transaction('unbundle')
1191 tr = repo.transaction('unbundle')
1191 tr.hookargs['bundle2-exp'] = '1'
1192 tr.hookargs['bundle2-exp'] = '1'
1192 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1193 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1193 cl = repo.unfiltered().changelog
1194 cl = repo.unfiltered().changelog
1194 p = cl.writepending() and repo.root or ""
1195 p = cl.writepending() and repo.root or ""
1195 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1196 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1196 url=url, pending=p, **tr.hookargs)
1197 url=url, pending=p, **tr.hookargs)
1197 tr.close()
1198 tr.close()
1198 repo.hook('b2x-transactionclose', source=source, url=url,
1199 repo.hook('b2x-transactionclose', source=source, url=url,
1199 **tr.hookargs)
1200 **tr.hookargs)
1200 except Exception, exc:
1201 except Exception, exc:
1201 exc.duringunbundle2 = True
1202 exc.duringunbundle2 = True
1202 raise
1203 raise
1203 else:
1204 else:
1204 r = changegroup.addchangegroup(repo, cg, source, url)
1205 r = changegroup.addchangegroup(repo, cg, source, url)
1205 finally:
1206 finally:
1206 if tr is not None:
1207 if tr is not None:
1207 tr.release()
1208 tr.release()
1208 lock.release()
1209 lock.release()
1209 return r
1210 return r
General Comments 0
You need to be logged in to leave comments. Login now