##// END OF EJS Templates
changegroup: replace getlocalchangegroupraw with makestream...
Durham Goode -
r34100:d8245139 default
parent child Browse files
Show More
@@ -1,1016 +1,1005 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 dagutil,
23 dagutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def writechunks(ui, chunks, filename, vfs=None):
63 def writechunks(ui, chunks, filename, vfs=None):
64 """Write chunks to a file and return its filename.
64 """Write chunks to a file and return its filename.
65
65
66 The stream is assumed to be a bundle file.
66 The stream is assumed to be a bundle file.
67 Existing files will not be overwritten.
67 Existing files will not be overwritten.
68 If no filename is specified, a temporary file is created.
68 If no filename is specified, a temporary file is created.
69 """
69 """
70 fh = None
70 fh = None
71 cleanup = None
71 cleanup = None
72 try:
72 try:
73 if filename:
73 if filename:
74 if vfs:
74 if vfs:
75 fh = vfs.open(filename, "wb")
75 fh = vfs.open(filename, "wb")
76 else:
76 else:
77 # Increase default buffer size because default is usually
77 # Increase default buffer size because default is usually
78 # small (4k is common on Linux).
78 # small (4k is common on Linux).
79 fh = open(filename, "wb", 131072)
79 fh = open(filename, "wb", 131072)
80 else:
80 else:
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
83 cleanup = filename
83 cleanup = filename
84 for c in chunks:
84 for c in chunks:
85 fh.write(c)
85 fh.write(c)
86 cleanup = None
86 cleanup = None
87 return filename
87 return filename
88 finally:
88 finally:
89 if fh is not None:
89 if fh is not None:
90 fh.close()
90 fh.close()
91 if cleanup is not None:
91 if cleanup is not None:
92 if filename and vfs:
92 if filename and vfs:
93 vfs.unlink(cleanup)
93 vfs.unlink(cleanup)
94 else:
94 else:
95 os.unlink(cleanup)
95 os.unlink(cleanup)
96
96
97 class cg1unpacker(object):
97 class cg1unpacker(object):
98 """Unpacker for cg1 changegroup streams.
98 """Unpacker for cg1 changegroup streams.
99
99
100 A changegroup unpacker handles the framing of the revision data in
100 A changegroup unpacker handles the framing of the revision data in
101 the wire format. Most consumers will want to use the apply()
101 the wire format. Most consumers will want to use the apply()
102 method to add the changes from the changegroup to a repository.
102 method to add the changes from the changegroup to a repository.
103
103
104 If you're forwarding a changegroup unmodified to another consumer,
104 If you're forwarding a changegroup unmodified to another consumer,
105 use getchunks(), which returns an iterator of changegroup
105 use getchunks(), which returns an iterator of changegroup
106 chunks. This is mostly useful for cases where you need to know the
106 chunks. This is mostly useful for cases where you need to know the
107 data stream has ended by observing the end of the changegroup.
107 data stream has ended by observing the end of the changegroup.
108
108
109 deltachunk() is useful only if you're applying delta data. Most
109 deltachunk() is useful only if you're applying delta data. Most
110 consumers should prefer apply() instead.
110 consumers should prefer apply() instead.
111
111
112 A few other public methods exist. Those are used only for
112 A few other public methods exist. Those are used only for
113 bundlerepo and some debug commands - their use is discouraged.
113 bundlerepo and some debug commands - their use is discouraged.
114 """
114 """
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
116 deltaheadersize = struct.calcsize(deltaheader)
116 deltaheadersize = struct.calcsize(deltaheader)
117 version = '01'
117 version = '01'
118 _grouplistcount = 1 # One list of files after the manifests
118 _grouplistcount = 1 # One list of files after the manifests
119
119
120 def __init__(self, fh, alg, extras=None):
120 def __init__(self, fh, alg, extras=None):
121 if alg is None:
121 if alg is None:
122 alg = 'UN'
122 alg = 'UN'
123 if alg not in util.compengines.supportedbundletypes:
123 if alg not in util.compengines.supportedbundletypes:
124 raise error.Abort(_('unknown stream compression type: %s')
124 raise error.Abort(_('unknown stream compression type: %s')
125 % alg)
125 % alg)
126 if alg == 'BZ':
126 if alg == 'BZ':
127 alg = '_truncatedBZ'
127 alg = '_truncatedBZ'
128
128
129 compengine = util.compengines.forbundletype(alg)
129 compengine = util.compengines.forbundletype(alg)
130 self._stream = compengine.decompressorreader(fh)
130 self._stream = compengine.decompressorreader(fh)
131 self._type = alg
131 self._type = alg
132 self.extras = extras or {}
132 self.extras = extras or {}
133 self.callback = None
133 self.callback = None
134
134
135 # These methods (compressed, read, seek, tell) all appear to only
135 # These methods (compressed, read, seek, tell) all appear to only
136 # be used by bundlerepo, but it's a little hard to tell.
136 # be used by bundlerepo, but it's a little hard to tell.
137 def compressed(self):
137 def compressed(self):
138 return self._type is not None and self._type != 'UN'
138 return self._type is not None and self._type != 'UN'
139 def read(self, l):
139 def read(self, l):
140 return self._stream.read(l)
140 return self._stream.read(l)
141 def seek(self, pos):
141 def seek(self, pos):
142 return self._stream.seek(pos)
142 return self._stream.seek(pos)
143 def tell(self):
143 def tell(self):
144 return self._stream.tell()
144 return self._stream.tell()
145 def close(self):
145 def close(self):
146 return self._stream.close()
146 return self._stream.close()
147
147
148 def _chunklength(self):
148 def _chunklength(self):
149 d = readexactly(self._stream, 4)
149 d = readexactly(self._stream, 4)
150 l = struct.unpack(">l", d)[0]
150 l = struct.unpack(">l", d)[0]
151 if l <= 4:
151 if l <= 4:
152 if l:
152 if l:
153 raise error.Abort(_("invalid chunk length %d") % l)
153 raise error.Abort(_("invalid chunk length %d") % l)
154 return 0
154 return 0
155 if self.callback:
155 if self.callback:
156 self.callback()
156 self.callback()
157 return l - 4
157 return l - 4
158
158
159 def changelogheader(self):
159 def changelogheader(self):
160 """v10 does not have a changelog header chunk"""
160 """v10 does not have a changelog header chunk"""
161 return {}
161 return {}
162
162
163 def manifestheader(self):
163 def manifestheader(self):
164 """v10 does not have a manifest header chunk"""
164 """v10 does not have a manifest header chunk"""
165 return {}
165 return {}
166
166
167 def filelogheader(self):
167 def filelogheader(self):
168 """return the header of the filelogs chunk, v10 only has the filename"""
168 """return the header of the filelogs chunk, v10 only has the filename"""
169 l = self._chunklength()
169 l = self._chunklength()
170 if not l:
170 if not l:
171 return {}
171 return {}
172 fname = readexactly(self._stream, l)
172 fname = readexactly(self._stream, l)
173 return {'filename': fname}
173 return {'filename': fname}
174
174
175 def _deltaheader(self, headertuple, prevnode):
175 def _deltaheader(self, headertuple, prevnode):
176 node, p1, p2, cs = headertuple
176 node, p1, p2, cs = headertuple
177 if prevnode is None:
177 if prevnode is None:
178 deltabase = p1
178 deltabase = p1
179 else:
179 else:
180 deltabase = prevnode
180 deltabase = prevnode
181 flags = 0
181 flags = 0
182 return node, p1, p2, deltabase, cs, flags
182 return node, p1, p2, deltabase, cs, flags
183
183
184 def deltachunk(self, prevnode):
184 def deltachunk(self, prevnode):
185 l = self._chunklength()
185 l = self._chunklength()
186 if not l:
186 if not l:
187 return {}
187 return {}
188 headerdata = readexactly(self._stream, self.deltaheadersize)
188 headerdata = readexactly(self._stream, self.deltaheadersize)
189 header = struct.unpack(self.deltaheader, headerdata)
189 header = struct.unpack(self.deltaheader, headerdata)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
194
194
195 def getchunks(self):
195 def getchunks(self):
196 """returns all the chunks contains in the bundle
196 """returns all the chunks contains in the bundle
197
197
198 Used when you need to forward the binary stream to a file or another
198 Used when you need to forward the binary stream to a file or another
199 network API. To do so, it parse the changegroup data, otherwise it will
199 network API. To do so, it parse the changegroup data, otherwise it will
200 block in case of sshrepo because it don't know the end of the stream.
200 block in case of sshrepo because it don't know the end of the stream.
201 """
201 """
202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
205 # filelogs.
205 # filelogs.
206 #
206 #
207 # Changelog and manifestlog parts are terminated with empty chunks. The
207 # Changelog and manifestlog parts are terminated with empty chunks. The
208 # tree and file parts are a list of entry sections. Each entry section
208 # tree and file parts are a list of entry sections. Each entry section
209 # is a series of chunks terminating in an empty chunk. The list of these
209 # is a series of chunks terminating in an empty chunk. The list of these
210 # entry sections is terminated in yet another empty chunk, so we know
210 # entry sections is terminated in yet another empty chunk, so we know
211 # we've reached the end of the tree/file list when we reach an empty
211 # we've reached the end of the tree/file list when we reach an empty
212 # chunk that was proceeded by no non-empty chunks.
212 # chunk that was proceeded by no non-empty chunks.
213
213
214 parts = 0
214 parts = 0
215 while parts < 2 + self._grouplistcount:
215 while parts < 2 + self._grouplistcount:
216 noentries = True
216 noentries = True
217 while True:
217 while True:
218 chunk = getchunk(self)
218 chunk = getchunk(self)
219 if not chunk:
219 if not chunk:
220 # The first two empty chunks represent the end of the
220 # The first two empty chunks represent the end of the
221 # changelog and the manifestlog portions. The remaining
221 # changelog and the manifestlog portions. The remaining
222 # empty chunks represent either A) the end of individual
222 # empty chunks represent either A) the end of individual
223 # tree or file entries in the file list, or B) the end of
223 # tree or file entries in the file list, or B) the end of
224 # the entire list. It's the end of the entire list if there
224 # the entire list. It's the end of the entire list if there
225 # were no entries (i.e. noentries is True).
225 # were no entries (i.e. noentries is True).
226 if parts < 2:
226 if parts < 2:
227 parts += 1
227 parts += 1
228 elif noentries:
228 elif noentries:
229 parts += 1
229 parts += 1
230 break
230 break
231 noentries = False
231 noentries = False
232 yield chunkheader(len(chunk))
232 yield chunkheader(len(chunk))
233 pos = 0
233 pos = 0
234 while pos < len(chunk):
234 while pos < len(chunk):
235 next = pos + 2**20
235 next = pos + 2**20
236 yield chunk[pos:next]
236 yield chunk[pos:next]
237 pos = next
237 pos = next
238 yield closechunk()
238 yield closechunk()
239
239
240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
241 # We know that we'll never have more manifests than we had
241 # We know that we'll never have more manifests than we had
242 # changesets.
242 # changesets.
243 self.callback = prog(_('manifests'), numchanges)
243 self.callback = prog(_('manifests'), numchanges)
244 # no need to check for empty manifest group here:
244 # no need to check for empty manifest group here:
245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
246 # no new manifest will be created and the manifest group will
246 # no new manifest will be created and the manifest group will
247 # be empty during the pull
247 # be empty during the pull
248 self.manifestheader()
248 self.manifestheader()
249 repo.manifestlog._revlog.addgroup(self, revmap, trp)
249 repo.manifestlog._revlog.addgroup(self, revmap, trp)
250 repo.ui.progress(_('manifests'), None)
250 repo.ui.progress(_('manifests'), None)
251 self.callback = None
251 self.callback = None
252
252
253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
254 expectedtotal=None):
254 expectedtotal=None):
255 """Add the changegroup returned by source.read() to this repo.
255 """Add the changegroup returned by source.read() to this repo.
256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
257 the URL of the repo where this changegroup is coming from.
257 the URL of the repo where this changegroup is coming from.
258
258
259 Return an integer summarizing the change to this repo:
259 Return an integer summarizing the change to this repo:
260 - nothing changed or no source: 0
260 - nothing changed or no source: 0
261 - more heads than before: 1+added heads (2..n)
261 - more heads than before: 1+added heads (2..n)
262 - fewer heads than before: -1-removed heads (-2..-n)
262 - fewer heads than before: -1-removed heads (-2..-n)
263 - number of heads stays the same: 1
263 - number of heads stays the same: 1
264 """
264 """
265 repo = repo.unfiltered()
265 repo = repo.unfiltered()
266 def csmap(x):
266 def csmap(x):
267 repo.ui.debug("add changeset %s\n" % short(x))
267 repo.ui.debug("add changeset %s\n" % short(x))
268 return len(cl)
268 return len(cl)
269
269
270 def revmap(x):
270 def revmap(x):
271 return cl.rev(x)
271 return cl.rev(x)
272
272
273 changesets = files = revisions = 0
273 changesets = files = revisions = 0
274
274
275 try:
275 try:
276 # The transaction may already carry source information. In this
276 # The transaction may already carry source information. In this
277 # case we use the top level data. We overwrite the argument
277 # case we use the top level data. We overwrite the argument
278 # because we need to use the top level value (if they exist)
278 # because we need to use the top level value (if they exist)
279 # in this function.
279 # in this function.
280 srctype = tr.hookargs.setdefault('source', srctype)
280 srctype = tr.hookargs.setdefault('source', srctype)
281 url = tr.hookargs.setdefault('url', url)
281 url = tr.hookargs.setdefault('url', url)
282 repo.hook('prechangegroup',
282 repo.hook('prechangegroup',
283 throw=True, **pycompat.strkwargs(tr.hookargs))
283 throw=True, **pycompat.strkwargs(tr.hookargs))
284
284
285 # write changelog data to temp files so concurrent readers
285 # write changelog data to temp files so concurrent readers
286 # will not see an inconsistent view
286 # will not see an inconsistent view
287 cl = repo.changelog
287 cl = repo.changelog
288 cl.delayupdate(tr)
288 cl.delayupdate(tr)
289 oldheads = set(cl.heads())
289 oldheads = set(cl.heads())
290
290
291 trp = weakref.proxy(tr)
291 trp = weakref.proxy(tr)
292 # pull off the changeset group
292 # pull off the changeset group
293 repo.ui.status(_("adding changesets\n"))
293 repo.ui.status(_("adding changesets\n"))
294 clstart = len(cl)
294 clstart = len(cl)
295 class prog(object):
295 class prog(object):
296 def __init__(self, step, total):
296 def __init__(self, step, total):
297 self._step = step
297 self._step = step
298 self._total = total
298 self._total = total
299 self._count = 1
299 self._count = 1
300 def __call__(self):
300 def __call__(self):
301 repo.ui.progress(self._step, self._count, unit=_('chunks'),
301 repo.ui.progress(self._step, self._count, unit=_('chunks'),
302 total=self._total)
302 total=self._total)
303 self._count += 1
303 self._count += 1
304 self.callback = prog(_('changesets'), expectedtotal)
304 self.callback = prog(_('changesets'), expectedtotal)
305
305
306 efiles = set()
306 efiles = set()
307 def onchangelog(cl, node):
307 def onchangelog(cl, node):
308 efiles.update(cl.readfiles(node))
308 efiles.update(cl.readfiles(node))
309
309
310 self.changelogheader()
310 self.changelogheader()
311 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
311 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
312 efiles = len(efiles)
312 efiles = len(efiles)
313
313
314 if not cgnodes:
314 if not cgnodes:
315 repo.ui.develwarn('applied empty changegroup',
315 repo.ui.develwarn('applied empty changegroup',
316 config='empty-changegroup')
316 config='empty-changegroup')
317 clend = len(cl)
317 clend = len(cl)
318 changesets = clend - clstart
318 changesets = clend - clstart
319 repo.ui.progress(_('changesets'), None)
319 repo.ui.progress(_('changesets'), None)
320 self.callback = None
320 self.callback = None
321
321
322 # pull off the manifest group
322 # pull off the manifest group
323 repo.ui.status(_("adding manifests\n"))
323 repo.ui.status(_("adding manifests\n"))
324 self._unpackmanifests(repo, revmap, trp, prog, changesets)
324 self._unpackmanifests(repo, revmap, trp, prog, changesets)
325
325
326 needfiles = {}
326 needfiles = {}
327 if repo.ui.configbool('server', 'validate'):
327 if repo.ui.configbool('server', 'validate'):
328 cl = repo.changelog
328 cl = repo.changelog
329 ml = repo.manifestlog
329 ml = repo.manifestlog
330 # validate incoming csets have their manifests
330 # validate incoming csets have their manifests
331 for cset in xrange(clstart, clend):
331 for cset in xrange(clstart, clend):
332 mfnode = cl.changelogrevision(cset).manifest
332 mfnode = cl.changelogrevision(cset).manifest
333 mfest = ml[mfnode].readdelta()
333 mfest = ml[mfnode].readdelta()
334 # store file cgnodes we must see
334 # store file cgnodes we must see
335 for f, n in mfest.iteritems():
335 for f, n in mfest.iteritems():
336 needfiles.setdefault(f, set()).add(n)
336 needfiles.setdefault(f, set()).add(n)
337
337
338 # process the files
338 # process the files
339 repo.ui.status(_("adding file changes\n"))
339 repo.ui.status(_("adding file changes\n"))
340 newrevs, newfiles = _addchangegroupfiles(
340 newrevs, newfiles = _addchangegroupfiles(
341 repo, self, revmap, trp, efiles, needfiles)
341 repo, self, revmap, trp, efiles, needfiles)
342 revisions += newrevs
342 revisions += newrevs
343 files += newfiles
343 files += newfiles
344
344
345 deltaheads = 0
345 deltaheads = 0
346 if oldheads:
346 if oldheads:
347 heads = cl.heads()
347 heads = cl.heads()
348 deltaheads = len(heads) - len(oldheads)
348 deltaheads = len(heads) - len(oldheads)
349 for h in heads:
349 for h in heads:
350 if h not in oldheads and repo[h].closesbranch():
350 if h not in oldheads and repo[h].closesbranch():
351 deltaheads -= 1
351 deltaheads -= 1
352 htext = ""
352 htext = ""
353 if deltaheads:
353 if deltaheads:
354 htext = _(" (%+d heads)") % deltaheads
354 htext = _(" (%+d heads)") % deltaheads
355
355
356 repo.ui.status(_("added %d changesets"
356 repo.ui.status(_("added %d changesets"
357 " with %d changes to %d files%s\n")
357 " with %d changes to %d files%s\n")
358 % (changesets, revisions, files, htext))
358 % (changesets, revisions, files, htext))
359 repo.invalidatevolatilesets()
359 repo.invalidatevolatilesets()
360
360
361 if changesets > 0:
361 if changesets > 0:
362 if 'node' not in tr.hookargs:
362 if 'node' not in tr.hookargs:
363 tr.hookargs['node'] = hex(cl.node(clstart))
363 tr.hookargs['node'] = hex(cl.node(clstart))
364 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
364 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
365 hookargs = dict(tr.hookargs)
365 hookargs = dict(tr.hookargs)
366 else:
366 else:
367 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
368 hookargs['node'] = hex(cl.node(clstart))
368 hookargs['node'] = hex(cl.node(clstart))
369 hookargs['node_last'] = hex(cl.node(clend - 1))
369 hookargs['node_last'] = hex(cl.node(clend - 1))
370 repo.hook('pretxnchangegroup',
370 repo.hook('pretxnchangegroup',
371 throw=True, **pycompat.strkwargs(hookargs))
371 throw=True, **pycompat.strkwargs(hookargs))
372
372
373 added = [cl.node(r) for r in xrange(clstart, clend)]
373 added = [cl.node(r) for r in xrange(clstart, clend)]
374 phaseall = None
374 phaseall = None
375 if srctype in ('push', 'serve'):
375 if srctype in ('push', 'serve'):
376 # Old servers can not push the boundary themselves.
376 # Old servers can not push the boundary themselves.
377 # New servers won't push the boundary if changeset already
377 # New servers won't push the boundary if changeset already
378 # exists locally as secret
378 # exists locally as secret
379 #
379 #
380 # We should not use added here but the list of all change in
380 # We should not use added here but the list of all change in
381 # the bundle
381 # the bundle
382 if repo.publishing():
382 if repo.publishing():
383 targetphase = phaseall = phases.public
383 targetphase = phaseall = phases.public
384 else:
384 else:
385 # closer target phase computation
385 # closer target phase computation
386
386
387 # Those changesets have been pushed from the
387 # Those changesets have been pushed from the
388 # outside, their phases are going to be pushed
388 # outside, their phases are going to be pushed
389 # alongside. Therefor `targetphase` is
389 # alongside. Therefor `targetphase` is
390 # ignored.
390 # ignored.
391 targetphase = phaseall = phases.draft
391 targetphase = phaseall = phases.draft
392 if added:
392 if added:
393 phases.registernew(repo, tr, targetphase, added)
393 phases.registernew(repo, tr, targetphase, added)
394 if phaseall is not None:
394 if phaseall is not None:
395 phases.advanceboundary(repo, tr, phaseall, cgnodes)
395 phases.advanceboundary(repo, tr, phaseall, cgnodes)
396
396
397 if changesets > 0:
397 if changesets > 0:
398
398
399 def runhooks():
399 def runhooks():
400 # These hooks run when the lock releases, not when the
400 # These hooks run when the lock releases, not when the
401 # transaction closes. So it's possible for the changelog
401 # transaction closes. So it's possible for the changelog
402 # to have changed since we last saw it.
402 # to have changed since we last saw it.
403 if clstart >= len(repo):
403 if clstart >= len(repo):
404 return
404 return
405
405
406 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
406 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
407
407
408 for n in added:
408 for n in added:
409 args = hookargs.copy()
409 args = hookargs.copy()
410 args['node'] = hex(n)
410 args['node'] = hex(n)
411 del args['node_last']
411 del args['node_last']
412 repo.hook("incoming", **pycompat.strkwargs(args))
412 repo.hook("incoming", **pycompat.strkwargs(args))
413
413
414 newheads = [h for h in repo.heads()
414 newheads = [h for h in repo.heads()
415 if h not in oldheads]
415 if h not in oldheads]
416 repo.ui.log("incoming",
416 repo.ui.log("incoming",
417 "%s incoming changes - new heads: %s\n",
417 "%s incoming changes - new heads: %s\n",
418 len(added),
418 len(added),
419 ', '.join([hex(c[:6]) for c in newheads]))
419 ', '.join([hex(c[:6]) for c in newheads]))
420
420
421 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
421 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
422 lambda tr: repo._afterlock(runhooks))
422 lambda tr: repo._afterlock(runhooks))
423 finally:
423 finally:
424 repo.ui.flush()
424 repo.ui.flush()
425 # never return 0 here:
425 # never return 0 here:
426 if deltaheads < 0:
426 if deltaheads < 0:
427 ret = deltaheads - 1
427 ret = deltaheads - 1
428 else:
428 else:
429 ret = deltaheads + 1
429 ret = deltaheads + 1
430 return ret
430 return ret
431
431
432 class cg2unpacker(cg1unpacker):
432 class cg2unpacker(cg1unpacker):
433 """Unpacker for cg2 streams.
433 """Unpacker for cg2 streams.
434
434
435 cg2 streams add support for generaldelta, so the delta header
435 cg2 streams add support for generaldelta, so the delta header
436 format is slightly different. All other features about the data
436 format is slightly different. All other features about the data
437 remain the same.
437 remain the same.
438 """
438 """
439 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
439 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
440 deltaheadersize = struct.calcsize(deltaheader)
440 deltaheadersize = struct.calcsize(deltaheader)
441 version = '02'
441 version = '02'
442
442
443 def _deltaheader(self, headertuple, prevnode):
443 def _deltaheader(self, headertuple, prevnode):
444 node, p1, p2, deltabase, cs = headertuple
444 node, p1, p2, deltabase, cs = headertuple
445 flags = 0
445 flags = 0
446 return node, p1, p2, deltabase, cs, flags
446 return node, p1, p2, deltabase, cs, flags
447
447
448 class cg3unpacker(cg2unpacker):
448 class cg3unpacker(cg2unpacker):
449 """Unpacker for cg3 streams.
449 """Unpacker for cg3 streams.
450
450
451 cg3 streams add support for exchanging treemanifests and revlog
451 cg3 streams add support for exchanging treemanifests and revlog
452 flags. It adds the revlog flags to the delta header and an empty chunk
452 flags. It adds the revlog flags to the delta header and an empty chunk
453 separating manifests and files.
453 separating manifests and files.
454 """
454 """
455 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
455 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
456 deltaheadersize = struct.calcsize(deltaheader)
456 deltaheadersize = struct.calcsize(deltaheader)
457 version = '03'
457 version = '03'
458 _grouplistcount = 2 # One list of manifests and one list of files
458 _grouplistcount = 2 # One list of manifests and one list of files
459
459
460 def _deltaheader(self, headertuple, prevnode):
460 def _deltaheader(self, headertuple, prevnode):
461 node, p1, p2, deltabase, cs, flags = headertuple
461 node, p1, p2, deltabase, cs, flags = headertuple
462 return node, p1, p2, deltabase, cs, flags
462 return node, p1, p2, deltabase, cs, flags
463
463
464 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
464 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
465 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
465 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
466 numchanges)
466 numchanges)
467 for chunkdata in iter(self.filelogheader, {}):
467 for chunkdata in iter(self.filelogheader, {}):
468 # If we get here, there are directory manifests in the changegroup
468 # If we get here, there are directory manifests in the changegroup
469 d = chunkdata["filename"]
469 d = chunkdata["filename"]
470 repo.ui.debug("adding %s revisions\n" % d)
470 repo.ui.debug("adding %s revisions\n" % d)
471 dirlog = repo.manifestlog._revlog.dirlog(d)
471 dirlog = repo.manifestlog._revlog.dirlog(d)
472 if not dirlog.addgroup(self, revmap, trp):
472 if not dirlog.addgroup(self, revmap, trp):
473 raise error.Abort(_("received dir revlog group is empty"))
473 raise error.Abort(_("received dir revlog group is empty"))
474
474
475 class headerlessfixup(object):
475 class headerlessfixup(object):
476 def __init__(self, fh, h):
476 def __init__(self, fh, h):
477 self._h = h
477 self._h = h
478 self._fh = fh
478 self._fh = fh
479 def read(self, n):
479 def read(self, n):
480 if self._h:
480 if self._h:
481 d, self._h = self._h[:n], self._h[n:]
481 d, self._h = self._h[:n], self._h[n:]
482 if len(d) < n:
482 if len(d) < n:
483 d += readexactly(self._fh, n - len(d))
483 d += readexactly(self._fh, n - len(d))
484 return d
484 return d
485 return readexactly(self._fh, n)
485 return readexactly(self._fh, n)
486
486
487 class cg1packer(object):
487 class cg1packer(object):
488 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
488 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
489 version = '01'
489 version = '01'
490 def __init__(self, repo, bundlecaps=None):
490 def __init__(self, repo, bundlecaps=None):
491 """Given a source repo, construct a bundler.
491 """Given a source repo, construct a bundler.
492
492
493 bundlecaps is optional and can be used to specify the set of
493 bundlecaps is optional and can be used to specify the set of
494 capabilities which can be used to build the bundle. While bundlecaps is
494 capabilities which can be used to build the bundle. While bundlecaps is
495 unused in core Mercurial, extensions rely on this feature to communicate
495 unused in core Mercurial, extensions rely on this feature to communicate
496 capabilities to customize the changegroup packer.
496 capabilities to customize the changegroup packer.
497 """
497 """
498 # Set of capabilities we can use to build the bundle.
498 # Set of capabilities we can use to build the bundle.
499 if bundlecaps is None:
499 if bundlecaps is None:
500 bundlecaps = set()
500 bundlecaps = set()
501 self._bundlecaps = bundlecaps
501 self._bundlecaps = bundlecaps
502 # experimental config: bundle.reorder
502 # experimental config: bundle.reorder
503 reorder = repo.ui.config('bundle', 'reorder')
503 reorder = repo.ui.config('bundle', 'reorder')
504 if reorder == 'auto':
504 if reorder == 'auto':
505 reorder = None
505 reorder = None
506 else:
506 else:
507 reorder = util.parsebool(reorder)
507 reorder = util.parsebool(reorder)
508 self._repo = repo
508 self._repo = repo
509 self._reorder = reorder
509 self._reorder = reorder
510 self._progress = repo.ui.progress
510 self._progress = repo.ui.progress
511 if self._repo.ui.verbose and not self._repo.ui.debugflag:
511 if self._repo.ui.verbose and not self._repo.ui.debugflag:
512 self._verbosenote = self._repo.ui.note
512 self._verbosenote = self._repo.ui.note
513 else:
513 else:
514 self._verbosenote = lambda s: None
514 self._verbosenote = lambda s: None
515
515
516 def close(self):
516 def close(self):
517 return closechunk()
517 return closechunk()
518
518
519 def fileheader(self, fname):
519 def fileheader(self, fname):
520 return chunkheader(len(fname)) + fname
520 return chunkheader(len(fname)) + fname
521
521
522 # Extracted both for clarity and for overriding in extensions.
522 # Extracted both for clarity and for overriding in extensions.
523 def _sortgroup(self, revlog, nodelist, lookup):
523 def _sortgroup(self, revlog, nodelist, lookup):
524 """Sort nodes for change group and turn them into revnums."""
524 """Sort nodes for change group and turn them into revnums."""
525 # for generaldelta revlogs, we linearize the revs; this will both be
525 # for generaldelta revlogs, we linearize the revs; this will both be
526 # much quicker and generate a much smaller bundle
526 # much quicker and generate a much smaller bundle
527 if (revlog._generaldelta and self._reorder is None) or self._reorder:
527 if (revlog._generaldelta and self._reorder is None) or self._reorder:
528 dag = dagutil.revlogdag(revlog)
528 dag = dagutil.revlogdag(revlog)
529 return dag.linearize(set(revlog.rev(n) for n in nodelist))
529 return dag.linearize(set(revlog.rev(n) for n in nodelist))
530 else:
530 else:
531 return sorted([revlog.rev(n) for n in nodelist])
531 return sorted([revlog.rev(n) for n in nodelist])
532
532
533 def group(self, nodelist, revlog, lookup, units=None):
533 def group(self, nodelist, revlog, lookup, units=None):
534 """Calculate a delta group, yielding a sequence of changegroup chunks
534 """Calculate a delta group, yielding a sequence of changegroup chunks
535 (strings).
535 (strings).
536
536
537 Given a list of changeset revs, return a set of deltas and
537 Given a list of changeset revs, return a set of deltas and
538 metadata corresponding to nodes. The first delta is
538 metadata corresponding to nodes. The first delta is
539 first parent(nodelist[0]) -> nodelist[0], the receiver is
539 first parent(nodelist[0]) -> nodelist[0], the receiver is
540 guaranteed to have this parent as it has all history before
540 guaranteed to have this parent as it has all history before
541 these changesets. In the case firstparent is nullrev the
541 these changesets. In the case firstparent is nullrev the
542 changegroup starts with a full revision.
542 changegroup starts with a full revision.
543
543
544 If units is not None, progress detail will be generated, units specifies
544 If units is not None, progress detail will be generated, units specifies
545 the type of revlog that is touched (changelog, manifest, etc.).
545 the type of revlog that is touched (changelog, manifest, etc.).
546 """
546 """
547 # if we don't have any revisions touched by these changesets, bail
547 # if we don't have any revisions touched by these changesets, bail
548 if len(nodelist) == 0:
548 if len(nodelist) == 0:
549 yield self.close()
549 yield self.close()
550 return
550 return
551
551
552 revs = self._sortgroup(revlog, nodelist, lookup)
552 revs = self._sortgroup(revlog, nodelist, lookup)
553
553
554 # add the parent of the first rev
554 # add the parent of the first rev
555 p = revlog.parentrevs(revs[0])[0]
555 p = revlog.parentrevs(revs[0])[0]
556 revs.insert(0, p)
556 revs.insert(0, p)
557
557
558 # build deltas
558 # build deltas
559 total = len(revs) - 1
559 total = len(revs) - 1
560 msgbundling = _('bundling')
560 msgbundling = _('bundling')
561 for r in xrange(len(revs) - 1):
561 for r in xrange(len(revs) - 1):
562 if units is not None:
562 if units is not None:
563 self._progress(msgbundling, r + 1, unit=units, total=total)
563 self._progress(msgbundling, r + 1, unit=units, total=total)
564 prev, curr = revs[r], revs[r + 1]
564 prev, curr = revs[r], revs[r + 1]
565 linknode = lookup(revlog.node(curr))
565 linknode = lookup(revlog.node(curr))
566 for c in self.revchunk(revlog, curr, prev, linknode):
566 for c in self.revchunk(revlog, curr, prev, linknode):
567 yield c
567 yield c
568
568
569 if units is not None:
569 if units is not None:
570 self._progress(msgbundling, None)
570 self._progress(msgbundling, None)
571 yield self.close()
571 yield self.close()
572
572
573 # filter any nodes that claim to be part of the known set
573 # filter any nodes that claim to be part of the known set
574 def prune(self, revlog, missing, commonrevs):
574 def prune(self, revlog, missing, commonrevs):
575 rr, rl = revlog.rev, revlog.linkrev
575 rr, rl = revlog.rev, revlog.linkrev
576 return [n for n in missing if rl(rr(n)) not in commonrevs]
576 return [n for n in missing if rl(rr(n)) not in commonrevs]
577
577
578 def _packmanifests(self, dir, mfnodes, lookuplinknode):
578 def _packmanifests(self, dir, mfnodes, lookuplinknode):
579 """Pack flat manifests into a changegroup stream."""
579 """Pack flat manifests into a changegroup stream."""
580 assert not dir
580 assert not dir
581 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
581 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
582 lookuplinknode, units=_('manifests')):
582 lookuplinknode, units=_('manifests')):
583 yield chunk
583 yield chunk
584
584
585 def _manifestsdone(self):
585 def _manifestsdone(self):
586 return ''
586 return ''
587
587
588 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
588 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
589 '''yield a sequence of changegroup chunks (strings)'''
589 '''yield a sequence of changegroup chunks (strings)'''
590 repo = self._repo
590 repo = self._repo
591 cl = repo.changelog
591 cl = repo.changelog
592
592
593 clrevorder = {}
593 clrevorder = {}
594 mfs = {} # needed manifests
594 mfs = {} # needed manifests
595 fnodes = {} # needed file nodes
595 fnodes = {} # needed file nodes
596 changedfiles = set()
596 changedfiles = set()
597
597
598 # Callback for the changelog, used to collect changed files and manifest
598 # Callback for the changelog, used to collect changed files and manifest
599 # nodes.
599 # nodes.
600 # Returns the linkrev node (identity in the changelog case).
600 # Returns the linkrev node (identity in the changelog case).
601 def lookupcl(x):
601 def lookupcl(x):
602 c = cl.read(x)
602 c = cl.read(x)
603 clrevorder[x] = len(clrevorder)
603 clrevorder[x] = len(clrevorder)
604 n = c[0]
604 n = c[0]
605 # record the first changeset introducing this manifest version
605 # record the first changeset introducing this manifest version
606 mfs.setdefault(n, x)
606 mfs.setdefault(n, x)
607 # Record a complete list of potentially-changed files in
607 # Record a complete list of potentially-changed files in
608 # this manifest.
608 # this manifest.
609 changedfiles.update(c[3])
609 changedfiles.update(c[3])
610 return x
610 return x
611
611
612 self._verbosenote(_('uncompressed size of bundle content:\n'))
612 self._verbosenote(_('uncompressed size of bundle content:\n'))
613 size = 0
613 size = 0
614 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
614 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
615 size += len(chunk)
615 size += len(chunk)
616 yield chunk
616 yield chunk
617 self._verbosenote(_('%8.i (changelog)\n') % size)
617 self._verbosenote(_('%8.i (changelog)\n') % size)
618
618
619 # We need to make sure that the linkrev in the changegroup refers to
619 # We need to make sure that the linkrev in the changegroup refers to
620 # the first changeset that introduced the manifest or file revision.
620 # the first changeset that introduced the manifest or file revision.
621 # The fastpath is usually safer than the slowpath, because the filelogs
621 # The fastpath is usually safer than the slowpath, because the filelogs
622 # are walked in revlog order.
622 # are walked in revlog order.
623 #
623 #
624 # When taking the slowpath with reorder=None and the manifest revlog
624 # When taking the slowpath with reorder=None and the manifest revlog
625 # uses generaldelta, the manifest may be walked in the "wrong" order.
625 # uses generaldelta, the manifest may be walked in the "wrong" order.
626 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
626 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
627 # cc0ff93d0c0c).
627 # cc0ff93d0c0c).
628 #
628 #
629 # When taking the fastpath, we are only vulnerable to reordering
629 # When taking the fastpath, we are only vulnerable to reordering
630 # of the changelog itself. The changelog never uses generaldelta, so
630 # of the changelog itself. The changelog never uses generaldelta, so
631 # it is only reordered when reorder=True. To handle this case, we
631 # it is only reordered when reorder=True. To handle this case, we
632 # simply take the slowpath, which already has the 'clrevorder' logic.
632 # simply take the slowpath, which already has the 'clrevorder' logic.
633 # This was also fixed in cc0ff93d0c0c.
633 # This was also fixed in cc0ff93d0c0c.
634 fastpathlinkrev = fastpathlinkrev and not self._reorder
634 fastpathlinkrev = fastpathlinkrev and not self._reorder
635 # Treemanifests don't work correctly with fastpathlinkrev
635 # Treemanifests don't work correctly with fastpathlinkrev
636 # either, because we don't discover which directory nodes to
636 # either, because we don't discover which directory nodes to
637 # send along with files. This could probably be fixed.
637 # send along with files. This could probably be fixed.
638 fastpathlinkrev = fastpathlinkrev and (
638 fastpathlinkrev = fastpathlinkrev and (
639 'treemanifest' not in repo.requirements)
639 'treemanifest' not in repo.requirements)
640
640
641 for chunk in self.generatemanifests(commonrevs, clrevorder,
641 for chunk in self.generatemanifests(commonrevs, clrevorder,
642 fastpathlinkrev, mfs, fnodes):
642 fastpathlinkrev, mfs, fnodes):
643 yield chunk
643 yield chunk
644 mfs.clear()
644 mfs.clear()
645 clrevs = set(cl.rev(x) for x in clnodes)
645 clrevs = set(cl.rev(x) for x in clnodes)
646
646
647 if not fastpathlinkrev:
647 if not fastpathlinkrev:
648 def linknodes(unused, fname):
648 def linknodes(unused, fname):
649 return fnodes.get(fname, {})
649 return fnodes.get(fname, {})
650 else:
650 else:
651 cln = cl.node
651 cln = cl.node
652 def linknodes(filerevlog, fname):
652 def linknodes(filerevlog, fname):
653 llr = filerevlog.linkrev
653 llr = filerevlog.linkrev
654 fln = filerevlog.node
654 fln = filerevlog.node
655 revs = ((r, llr(r)) for r in filerevlog)
655 revs = ((r, llr(r)) for r in filerevlog)
656 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
656 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
657
657
658 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
658 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
659 source):
659 source):
660 yield chunk
660 yield chunk
661
661
662 yield self.close()
662 yield self.close()
663
663
664 if clnodes:
664 if clnodes:
665 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
665 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
666
666
667 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
667 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
668 fnodes):
668 fnodes):
669 repo = self._repo
669 repo = self._repo
670 mfl = repo.manifestlog
670 mfl = repo.manifestlog
671 dirlog = mfl._revlog.dirlog
671 dirlog = mfl._revlog.dirlog
672 tmfnodes = {'': mfs}
672 tmfnodes = {'': mfs}
673
673
674 # Callback for the manifest, used to collect linkrevs for filelog
674 # Callback for the manifest, used to collect linkrevs for filelog
675 # revisions.
675 # revisions.
676 # Returns the linkrev node (collected in lookupcl).
676 # Returns the linkrev node (collected in lookupcl).
677 def makelookupmflinknode(dir):
677 def makelookupmflinknode(dir):
678 if fastpathlinkrev:
678 if fastpathlinkrev:
679 assert not dir
679 assert not dir
680 return mfs.__getitem__
680 return mfs.__getitem__
681
681
682 def lookupmflinknode(x):
682 def lookupmflinknode(x):
683 """Callback for looking up the linknode for manifests.
683 """Callback for looking up the linknode for manifests.
684
684
685 Returns the linkrev node for the specified manifest.
685 Returns the linkrev node for the specified manifest.
686
686
687 SIDE EFFECT:
687 SIDE EFFECT:
688
688
689 1) fclnodes gets populated with the list of relevant
689 1) fclnodes gets populated with the list of relevant
690 file nodes if we're not using fastpathlinkrev
690 file nodes if we're not using fastpathlinkrev
691 2) When treemanifests are in use, collects treemanifest nodes
691 2) When treemanifests are in use, collects treemanifest nodes
692 to send
692 to send
693
693
694 Note that this means manifests must be completely sent to
694 Note that this means manifests must be completely sent to
695 the client before you can trust the list of files and
695 the client before you can trust the list of files and
696 treemanifests to send.
696 treemanifests to send.
697 """
697 """
698 clnode = tmfnodes[dir][x]
698 clnode = tmfnodes[dir][x]
699 mdata = mfl.get(dir, x).readfast(shallow=True)
699 mdata = mfl.get(dir, x).readfast(shallow=True)
700 for p, n, fl in mdata.iterentries():
700 for p, n, fl in mdata.iterentries():
701 if fl == 't': # subdirectory manifest
701 if fl == 't': # subdirectory manifest
702 subdir = dir + p + '/'
702 subdir = dir + p + '/'
703 tmfclnodes = tmfnodes.setdefault(subdir, {})
703 tmfclnodes = tmfnodes.setdefault(subdir, {})
704 tmfclnode = tmfclnodes.setdefault(n, clnode)
704 tmfclnode = tmfclnodes.setdefault(n, clnode)
705 if clrevorder[clnode] < clrevorder[tmfclnode]:
705 if clrevorder[clnode] < clrevorder[tmfclnode]:
706 tmfclnodes[n] = clnode
706 tmfclnodes[n] = clnode
707 else:
707 else:
708 f = dir + p
708 f = dir + p
709 fclnodes = fnodes.setdefault(f, {})
709 fclnodes = fnodes.setdefault(f, {})
710 fclnode = fclnodes.setdefault(n, clnode)
710 fclnode = fclnodes.setdefault(n, clnode)
711 if clrevorder[clnode] < clrevorder[fclnode]:
711 if clrevorder[clnode] < clrevorder[fclnode]:
712 fclnodes[n] = clnode
712 fclnodes[n] = clnode
713 return clnode
713 return clnode
714 return lookupmflinknode
714 return lookupmflinknode
715
715
716 size = 0
716 size = 0
717 while tmfnodes:
717 while tmfnodes:
718 dir = min(tmfnodes)
718 dir = min(tmfnodes)
719 nodes = tmfnodes[dir]
719 nodes = tmfnodes[dir]
720 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
720 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
721 if not dir or prunednodes:
721 if not dir or prunednodes:
722 for x in self._packmanifests(dir, prunednodes,
722 for x in self._packmanifests(dir, prunednodes,
723 makelookupmflinknode(dir)):
723 makelookupmflinknode(dir)):
724 size += len(x)
724 size += len(x)
725 yield x
725 yield x
726 del tmfnodes[dir]
726 del tmfnodes[dir]
727 self._verbosenote(_('%8.i (manifests)\n') % size)
727 self._verbosenote(_('%8.i (manifests)\n') % size)
728 yield self._manifestsdone()
728 yield self._manifestsdone()
729
729
730 # The 'source' parameter is useful for extensions
730 # The 'source' parameter is useful for extensions
731 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
731 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
732 repo = self._repo
732 repo = self._repo
733 progress = self._progress
733 progress = self._progress
734 msgbundling = _('bundling')
734 msgbundling = _('bundling')
735
735
736 total = len(changedfiles)
736 total = len(changedfiles)
737 # for progress output
737 # for progress output
738 msgfiles = _('files')
738 msgfiles = _('files')
739 for i, fname in enumerate(sorted(changedfiles)):
739 for i, fname in enumerate(sorted(changedfiles)):
740 filerevlog = repo.file(fname)
740 filerevlog = repo.file(fname)
741 if not filerevlog:
741 if not filerevlog:
742 raise error.Abort(_("empty or missing revlog for %s") % fname)
742 raise error.Abort(_("empty or missing revlog for %s") % fname)
743
743
744 linkrevnodes = linknodes(filerevlog, fname)
744 linkrevnodes = linknodes(filerevlog, fname)
745 # Lookup for filenodes, we collected the linkrev nodes above in the
745 # Lookup for filenodes, we collected the linkrev nodes above in the
746 # fastpath case and with lookupmf in the slowpath case.
746 # fastpath case and with lookupmf in the slowpath case.
747 def lookupfilelog(x):
747 def lookupfilelog(x):
748 return linkrevnodes[x]
748 return linkrevnodes[x]
749
749
750 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
750 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
751 if filenodes:
751 if filenodes:
752 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
752 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
753 total=total)
753 total=total)
754 h = self.fileheader(fname)
754 h = self.fileheader(fname)
755 size = len(h)
755 size = len(h)
756 yield h
756 yield h
757 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
757 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
758 size += len(chunk)
758 size += len(chunk)
759 yield chunk
759 yield chunk
760 self._verbosenote(_('%8.i %s\n') % (size, fname))
760 self._verbosenote(_('%8.i %s\n') % (size, fname))
761 progress(msgbundling, None)
761 progress(msgbundling, None)
762
762
763 def deltaparent(self, revlog, rev, p1, p2, prev):
763 def deltaparent(self, revlog, rev, p1, p2, prev):
764 return prev
764 return prev
765
765
766 def revchunk(self, revlog, rev, prev, linknode):
766 def revchunk(self, revlog, rev, prev, linknode):
767 node = revlog.node(rev)
767 node = revlog.node(rev)
768 p1, p2 = revlog.parentrevs(rev)
768 p1, p2 = revlog.parentrevs(rev)
769 base = self.deltaparent(revlog, rev, p1, p2, prev)
769 base = self.deltaparent(revlog, rev, p1, p2, prev)
770
770
771 prefix = ''
771 prefix = ''
772 if revlog.iscensored(base) or revlog.iscensored(rev):
772 if revlog.iscensored(base) or revlog.iscensored(rev):
773 try:
773 try:
774 delta = revlog.revision(node, raw=True)
774 delta = revlog.revision(node, raw=True)
775 except error.CensoredNodeError as e:
775 except error.CensoredNodeError as e:
776 delta = e.tombstone
776 delta = e.tombstone
777 if base == nullrev:
777 if base == nullrev:
778 prefix = mdiff.trivialdiffheader(len(delta))
778 prefix = mdiff.trivialdiffheader(len(delta))
779 else:
779 else:
780 baselen = revlog.rawsize(base)
780 baselen = revlog.rawsize(base)
781 prefix = mdiff.replacediffheader(baselen, len(delta))
781 prefix = mdiff.replacediffheader(baselen, len(delta))
782 elif base == nullrev:
782 elif base == nullrev:
783 delta = revlog.revision(node, raw=True)
783 delta = revlog.revision(node, raw=True)
784 prefix = mdiff.trivialdiffheader(len(delta))
784 prefix = mdiff.trivialdiffheader(len(delta))
785 else:
785 else:
786 delta = revlog.revdiff(base, rev)
786 delta = revlog.revdiff(base, rev)
787 p1n, p2n = revlog.parents(node)
787 p1n, p2n = revlog.parents(node)
788 basenode = revlog.node(base)
788 basenode = revlog.node(base)
789 flags = revlog.flags(rev)
789 flags = revlog.flags(rev)
790 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
790 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
791 meta += prefix
791 meta += prefix
792 l = len(meta) + len(delta)
792 l = len(meta) + len(delta)
793 yield chunkheader(l)
793 yield chunkheader(l)
794 yield meta
794 yield meta
795 yield delta
795 yield delta
796 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
796 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
797 # do nothing with basenode, it is implicitly the previous one in HG10
797 # do nothing with basenode, it is implicitly the previous one in HG10
798 # do nothing with flags, it is implicitly 0 for cg1 and cg2
798 # do nothing with flags, it is implicitly 0 for cg1 and cg2
799 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
799 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
800
800
801 class cg2packer(cg1packer):
801 class cg2packer(cg1packer):
802 version = '02'
802 version = '02'
803 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
803 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
804
804
805 def __init__(self, repo, bundlecaps=None):
805 def __init__(self, repo, bundlecaps=None):
806 super(cg2packer, self).__init__(repo, bundlecaps)
806 super(cg2packer, self).__init__(repo, bundlecaps)
807 if self._reorder is None:
807 if self._reorder is None:
808 # Since generaldelta is directly supported by cg2, reordering
808 # Since generaldelta is directly supported by cg2, reordering
809 # generally doesn't help, so we disable it by default (treating
809 # generally doesn't help, so we disable it by default (treating
810 # bundle.reorder=auto just like bundle.reorder=False).
810 # bundle.reorder=auto just like bundle.reorder=False).
811 self._reorder = False
811 self._reorder = False
812
812
813 def deltaparent(self, revlog, rev, p1, p2, prev):
813 def deltaparent(self, revlog, rev, p1, p2, prev):
814 dp = revlog.deltaparent(rev)
814 dp = revlog.deltaparent(rev)
815 if dp == nullrev and revlog.storedeltachains:
815 if dp == nullrev and revlog.storedeltachains:
816 # Avoid sending full revisions when delta parent is null. Pick prev
816 # Avoid sending full revisions when delta parent is null. Pick prev
817 # in that case. It's tempting to pick p1 in this case, as p1 will
817 # in that case. It's tempting to pick p1 in this case, as p1 will
818 # be smaller in the common case. However, computing a delta against
818 # be smaller in the common case. However, computing a delta against
819 # p1 may require resolving the raw text of p1, which could be
819 # p1 may require resolving the raw text of p1, which could be
820 # expensive. The revlog caches should have prev cached, meaning
820 # expensive. The revlog caches should have prev cached, meaning
821 # less CPU for changegroup generation. There is likely room to add
821 # less CPU for changegroup generation. There is likely room to add
822 # a flag and/or config option to control this behavior.
822 # a flag and/or config option to control this behavior.
823 return prev
823 return prev
824 elif dp == nullrev:
824 elif dp == nullrev:
825 # revlog is configured to use full snapshot for a reason,
825 # revlog is configured to use full snapshot for a reason,
826 # stick to full snapshot.
826 # stick to full snapshot.
827 return nullrev
827 return nullrev
828 elif dp not in (p1, p2, prev):
828 elif dp not in (p1, p2, prev):
829 # Pick prev when we can't be sure remote has the base revision.
829 # Pick prev when we can't be sure remote has the base revision.
830 return prev
830 return prev
831 else:
831 else:
832 return dp
832 return dp
833
833
834 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
834 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
835 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
835 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
836 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
836 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
837
837
838 class cg3packer(cg2packer):
838 class cg3packer(cg2packer):
839 version = '03'
839 version = '03'
840 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
840 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
841
841
842 def _packmanifests(self, dir, mfnodes, lookuplinknode):
842 def _packmanifests(self, dir, mfnodes, lookuplinknode):
843 if dir:
843 if dir:
844 yield self.fileheader(dir)
844 yield self.fileheader(dir)
845
845
846 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
846 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
847 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
847 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
848 units=_('manifests')):
848 units=_('manifests')):
849 yield chunk
849 yield chunk
850
850
851 def _manifestsdone(self):
851 def _manifestsdone(self):
852 return self.close()
852 return self.close()
853
853
854 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
854 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
855 return struct.pack(
855 return struct.pack(
856 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
856 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
857
857
858 _packermap = {'01': (cg1packer, cg1unpacker),
858 _packermap = {'01': (cg1packer, cg1unpacker),
859 # cg2 adds support for exchanging generaldelta
859 # cg2 adds support for exchanging generaldelta
860 '02': (cg2packer, cg2unpacker),
860 '02': (cg2packer, cg2unpacker),
861 # cg3 adds support for exchanging revlog flags and treemanifests
861 # cg3 adds support for exchanging revlog flags and treemanifests
862 '03': (cg3packer, cg3unpacker),
862 '03': (cg3packer, cg3unpacker),
863 }
863 }
864
864
865 def allsupportedversions(repo):
865 def allsupportedversions(repo):
866 versions = set(_packermap.keys())
866 versions = set(_packermap.keys())
867 if not (repo.ui.configbool('experimental', 'changegroup3') or
867 if not (repo.ui.configbool('experimental', 'changegroup3') or
868 repo.ui.configbool('experimental', 'treemanifest') or
868 repo.ui.configbool('experimental', 'treemanifest') or
869 'treemanifest' in repo.requirements):
869 'treemanifest' in repo.requirements):
870 versions.discard('03')
870 versions.discard('03')
871 return versions
871 return versions
872
872
873 # Changegroup versions that can be applied to the repo
873 # Changegroup versions that can be applied to the repo
874 def supportedincomingversions(repo):
874 def supportedincomingversions(repo):
875 return allsupportedversions(repo)
875 return allsupportedversions(repo)
876
876
877 # Changegroup versions that can be created from the repo
877 # Changegroup versions that can be created from the repo
878 def supportedoutgoingversions(repo):
878 def supportedoutgoingversions(repo):
879 versions = allsupportedversions(repo)
879 versions = allsupportedversions(repo)
880 if 'treemanifest' in repo.requirements:
880 if 'treemanifest' in repo.requirements:
881 # Versions 01 and 02 support only flat manifests and it's just too
881 # Versions 01 and 02 support only flat manifests and it's just too
882 # expensive to convert between the flat manifest and tree manifest on
882 # expensive to convert between the flat manifest and tree manifest on
883 # the fly. Since tree manifests are hashed differently, all of history
883 # the fly. Since tree manifests are hashed differently, all of history
884 # would have to be converted. Instead, we simply don't even pretend to
884 # would have to be converted. Instead, we simply don't even pretend to
885 # support versions 01 and 02.
885 # support versions 01 and 02.
886 versions.discard('01')
886 versions.discard('01')
887 versions.discard('02')
887 versions.discard('02')
888 return versions
888 return versions
889
889
890 def safeversion(repo):
890 def safeversion(repo):
891 # Finds the smallest version that it's safe to assume clients of the repo
891 # Finds the smallest version that it's safe to assume clients of the repo
892 # will support. For example, all hg versions that support generaldelta also
892 # will support. For example, all hg versions that support generaldelta also
893 # support changegroup 02.
893 # support changegroup 02.
894 versions = supportedoutgoingversions(repo)
894 versions = supportedoutgoingversions(repo)
895 if 'generaldelta' in repo.requirements:
895 if 'generaldelta' in repo.requirements:
896 versions.discard('01')
896 versions.discard('01')
897 assert versions
897 assert versions
898 return min(versions)
898 return min(versions)
899
899
900 def getbundler(version, repo, bundlecaps=None):
900 def getbundler(version, repo, bundlecaps=None):
901 assert version in supportedoutgoingversions(repo)
901 assert version in supportedoutgoingversions(repo)
902 return _packermap[version][0](repo, bundlecaps)
902 return _packermap[version][0](repo, bundlecaps)
903
903
904 def getunbundler(version, fh, alg, extras=None):
904 def getunbundler(version, fh, alg, extras=None):
905 return _packermap[version][1](fh, alg, extras=extras)
905 return _packermap[version][1](fh, alg, extras=extras)
906
906
907 def _changegroupinfo(repo, nodes, source):
907 def _changegroupinfo(repo, nodes, source):
908 if repo.ui.verbose or source == 'bundle':
908 if repo.ui.verbose or source == 'bundle':
909 repo.ui.status(_("%d changesets found\n") % len(nodes))
909 repo.ui.status(_("%d changesets found\n") % len(nodes))
910 if repo.ui.debugflag:
910 if repo.ui.debugflag:
911 repo.ui.debug("list of changesets:\n")
911 repo.ui.debug("list of changesets:\n")
912 for node in nodes:
912 for node in nodes:
913 repo.ui.debug("%s\n" % hex(node))
913 repo.ui.debug("%s\n" % hex(node))
914
914
915 def makestream(repo, outgoing, version, source, fastpath=False,
915 def makestream(repo, outgoing, version, source, fastpath=False,
916 bundlecaps=None):
916 bundlecaps=None):
917 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
917 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
918 return getsubsetraw(repo, outgoing, bundler, source, fastpath=fastpath)
918 return getsubsetraw(repo, outgoing, bundler, source, fastpath=fastpath)
919
919
920 def makechangegroup(repo, outgoing, version, source, fastpath=False,
920 def makechangegroup(repo, outgoing, version, source, fastpath=False,
921 bundlecaps=None):
921 bundlecaps=None):
922 cgstream = makestream(repo, outgoing, version, source,
922 cgstream = makestream(repo, outgoing, version, source,
923 fastpath=fastpath, bundlecaps=bundlecaps)
923 fastpath=fastpath, bundlecaps=bundlecaps)
924 return getunbundler(version, util.chunkbuffer(cgstream), None,
924 return getunbundler(version, util.chunkbuffer(cgstream), None,
925 {'clcount': len(outgoing.missing) })
925 {'clcount': len(outgoing.missing) })
926
926
927 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
927 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
928 repo = repo.unfiltered()
928 repo = repo.unfiltered()
929 commonrevs = outgoing.common
929 commonrevs = outgoing.common
930 csets = outgoing.missing
930 csets = outgoing.missing
931 heads = outgoing.missingheads
931 heads = outgoing.missingheads
932 # We go through the fast path if we get told to, or if all (unfiltered
932 # We go through the fast path if we get told to, or if all (unfiltered
933 # heads have been requested (since we then know there all linkrevs will
933 # heads have been requested (since we then know there all linkrevs will
934 # be pulled by the client).
934 # be pulled by the client).
935 heads.sort()
935 heads.sort()
936 fastpathlinkrev = fastpath or (
936 fastpathlinkrev = fastpath or (
937 repo.filtername is None and heads == sorted(repo.heads()))
937 repo.filtername is None and heads == sorted(repo.heads()))
938
938
939 repo.hook('preoutgoing', throw=True, source=source)
939 repo.hook('preoutgoing', throw=True, source=source)
940 _changegroupinfo(repo, csets, source)
940 _changegroupinfo(repo, csets, source)
941 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
941 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
942
942
943 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
944 version='01'):
945 """Like getbundle, but taking a discovery.outgoing as an argument.
946
947 This is only implemented for local repos and reuses potentially
948 precomputed sets in outgoing. Returns a raw changegroup generator."""
949 if not outgoing.missing:
950 return None
951 bundler = getbundler(version, repo, bundlecaps)
952 return getsubsetraw(repo, outgoing, bundler, source)
953
954 def getchangegroup(repo, source, outgoing, bundlecaps=None,
943 def getchangegroup(repo, source, outgoing, bundlecaps=None,
955 version='01'):
944 version='01'):
956 """Like getbundle, but taking a discovery.outgoing as an argument.
945 """Like getbundle, but taking a discovery.outgoing as an argument.
957
946
958 This is only implemented for local repos and reuses potentially
947 This is only implemented for local repos and reuses potentially
959 precomputed sets in outgoing."""
948 precomputed sets in outgoing."""
960 if not outgoing.missing:
949 if not outgoing.missing:
961 return None
950 return None
962 return makechangegroup(repo, outgoing, version, source,
951 return makechangegroup(repo, outgoing, version, source,
963 bundlecaps=bundlecaps)
952 bundlecaps=bundlecaps)
964
953
965 def getlocalchangegroup(repo, *args, **kwargs):
954 def getlocalchangegroup(repo, *args, **kwargs):
966 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
955 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
967 '4.3')
956 '4.3')
968 return getchangegroup(repo, *args, **kwargs)
957 return getchangegroup(repo, *args, **kwargs)
969
958
970 def changegroup(repo, basenodes, source):
959 def changegroup(repo, basenodes, source):
971 # to avoid a race we use changegroupsubset() (issue1320)
960 # to avoid a race we use changegroupsubset() (issue1320)
972 outgoing = discovery.outgoing(repo, missingroots=basenodes,
961 outgoing = discovery.outgoing(repo, missingroots=basenodes,
973 missingheads=repo.heads())
962 missingheads=repo.heads())
974 return makechangegroup(repo, outgoing, '01', source)
963 return makechangegroup(repo, outgoing, '01', source)
975
964
976 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
965 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
977 revisions = 0
966 revisions = 0
978 files = 0
967 files = 0
979 for chunkdata in iter(source.filelogheader, {}):
968 for chunkdata in iter(source.filelogheader, {}):
980 files += 1
969 files += 1
981 f = chunkdata["filename"]
970 f = chunkdata["filename"]
982 repo.ui.debug("adding %s revisions\n" % f)
971 repo.ui.debug("adding %s revisions\n" % f)
983 repo.ui.progress(_('files'), files, unit=_('files'),
972 repo.ui.progress(_('files'), files, unit=_('files'),
984 total=expectedfiles)
973 total=expectedfiles)
985 fl = repo.file(f)
974 fl = repo.file(f)
986 o = len(fl)
975 o = len(fl)
987 try:
976 try:
988 if not fl.addgroup(source, revmap, trp):
977 if not fl.addgroup(source, revmap, trp):
989 raise error.Abort(_("received file revlog group is empty"))
978 raise error.Abort(_("received file revlog group is empty"))
990 except error.CensoredBaseError as e:
979 except error.CensoredBaseError as e:
991 raise error.Abort(_("received delta base is censored: %s") % e)
980 raise error.Abort(_("received delta base is censored: %s") % e)
992 revisions += len(fl) - o
981 revisions += len(fl) - o
993 if f in needfiles:
982 if f in needfiles:
994 needs = needfiles[f]
983 needs = needfiles[f]
995 for new in xrange(o, len(fl)):
984 for new in xrange(o, len(fl)):
996 n = fl.node(new)
985 n = fl.node(new)
997 if n in needs:
986 if n in needs:
998 needs.remove(n)
987 needs.remove(n)
999 else:
988 else:
1000 raise error.Abort(
989 raise error.Abort(
1001 _("received spurious file revlog entry"))
990 _("received spurious file revlog entry"))
1002 if not needs:
991 if not needs:
1003 del needfiles[f]
992 del needfiles[f]
1004 repo.ui.progress(_('files'), None)
993 repo.ui.progress(_('files'), None)
1005
994
1006 for f, needs in needfiles.iteritems():
995 for f, needs in needfiles.iteritems():
1007 fl = repo.file(f)
996 fl = repo.file(f)
1008 for n in needs:
997 for n in needs:
1009 try:
998 try:
1010 fl.rev(n)
999 fl.rev(n)
1011 except error.LookupError:
1000 except error.LookupError:
1012 raise error.Abort(
1001 raise error.Abort(
1013 _('missing file data for %s:%s - run hg verify') %
1002 _('missing file data for %s:%s - run hg verify') %
1014 (f, hex(n)))
1003 (f, hex(n)))
1015
1004
1016 return revisions, files
1005 return revisions, files
@@ -1,2013 +1,2011 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks as bookmod,
19 bookmarks as bookmod,
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 lock as lockmod,
24 lock as lockmod,
25 obsolete,
25 obsolete,
26 phases,
26 phases,
27 pushkey,
27 pushkey,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sslutil,
30 sslutil,
31 streamclone,
31 streamclone,
32 url as urlmod,
32 url as urlmod,
33 util,
33 util,
34 )
34 )
35
35
36 urlerr = util.urlerr
36 urlerr = util.urlerr
37 urlreq = util.urlreq
37 urlreq = util.urlreq
38
38
39 # Maps bundle version human names to changegroup versions.
39 # Maps bundle version human names to changegroup versions.
40 _bundlespeccgversions = {'v1': '01',
40 _bundlespeccgversions = {'v1': '01',
41 'v2': '02',
41 'v2': '02',
42 'packed1': 's1',
42 'packed1': 's1',
43 'bundle2': '02', #legacy
43 'bundle2': '02', #legacy
44 }
44 }
45
45
46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48
48
49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 """Parse a bundle string specification into parts.
50 """Parse a bundle string specification into parts.
51
51
52 Bundle specifications denote a well-defined bundle/exchange format.
52 Bundle specifications denote a well-defined bundle/exchange format.
53 The content of a given specification should not change over time in
53 The content of a given specification should not change over time in
54 order to ensure that bundles produced by a newer version of Mercurial are
54 order to ensure that bundles produced by a newer version of Mercurial are
55 readable from an older version.
55 readable from an older version.
56
56
57 The string currently has the form:
57 The string currently has the form:
58
58
59 <compression>-<type>[;<parameter0>[;<parameter1>]]
59 <compression>-<type>[;<parameter0>[;<parameter1>]]
60
60
61 Where <compression> is one of the supported compression formats
61 Where <compression> is one of the supported compression formats
62 and <type> is (currently) a version string. A ";" can follow the type and
62 and <type> is (currently) a version string. A ";" can follow the type and
63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 pairs.
64 pairs.
65
65
66 If ``strict`` is True (the default) <compression> is required. Otherwise,
66 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 it is optional.
67 it is optional.
68
68
69 If ``externalnames`` is False (the default), the human-centric names will
69 If ``externalnames`` is False (the default), the human-centric names will
70 be converted to their internal representation.
70 be converted to their internal representation.
71
71
72 Returns a 3-tuple of (compression, version, parameters). Compression will
72 Returns a 3-tuple of (compression, version, parameters). Compression will
73 be ``None`` if not in strict mode and a compression isn't defined.
73 be ``None`` if not in strict mode and a compression isn't defined.
74
74
75 An ``InvalidBundleSpecification`` is raised when the specification is
75 An ``InvalidBundleSpecification`` is raised when the specification is
76 not syntactically well formed.
76 not syntactically well formed.
77
77
78 An ``UnsupportedBundleSpecification`` is raised when the compression or
78 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 bundle type/version is not recognized.
79 bundle type/version is not recognized.
80
80
81 Note: this function will likely eventually return a more complex data
81 Note: this function will likely eventually return a more complex data
82 structure, including bundle2 part information.
82 structure, including bundle2 part information.
83 """
83 """
84 def parseparams(s):
84 def parseparams(s):
85 if ';' not in s:
85 if ';' not in s:
86 return s, {}
86 return s, {}
87
87
88 params = {}
88 params = {}
89 version, paramstr = s.split(';', 1)
89 version, paramstr = s.split(';', 1)
90
90
91 for p in paramstr.split(';'):
91 for p in paramstr.split(';'):
92 if '=' not in p:
92 if '=' not in p:
93 raise error.InvalidBundleSpecification(
93 raise error.InvalidBundleSpecification(
94 _('invalid bundle specification: '
94 _('invalid bundle specification: '
95 'missing "=" in parameter: %s') % p)
95 'missing "=" in parameter: %s') % p)
96
96
97 key, value = p.split('=', 1)
97 key, value = p.split('=', 1)
98 key = urlreq.unquote(key)
98 key = urlreq.unquote(key)
99 value = urlreq.unquote(value)
99 value = urlreq.unquote(value)
100 params[key] = value
100 params[key] = value
101
101
102 return version, params
102 return version, params
103
103
104
104
105 if strict and '-' not in spec:
105 if strict and '-' not in spec:
106 raise error.InvalidBundleSpecification(
106 raise error.InvalidBundleSpecification(
107 _('invalid bundle specification; '
107 _('invalid bundle specification; '
108 'must be prefixed with compression: %s') % spec)
108 'must be prefixed with compression: %s') % spec)
109
109
110 if '-' in spec:
110 if '-' in spec:
111 compression, version = spec.split('-', 1)
111 compression, version = spec.split('-', 1)
112
112
113 if compression not in util.compengines.supportedbundlenames:
113 if compression not in util.compengines.supportedbundlenames:
114 raise error.UnsupportedBundleSpecification(
114 raise error.UnsupportedBundleSpecification(
115 _('%s compression is not supported') % compression)
115 _('%s compression is not supported') % compression)
116
116
117 version, params = parseparams(version)
117 version, params = parseparams(version)
118
118
119 if version not in _bundlespeccgversions:
119 if version not in _bundlespeccgversions:
120 raise error.UnsupportedBundleSpecification(
120 raise error.UnsupportedBundleSpecification(
121 _('%s is not a recognized bundle version') % version)
121 _('%s is not a recognized bundle version') % version)
122 else:
122 else:
123 # Value could be just the compression or just the version, in which
123 # Value could be just the compression or just the version, in which
124 # case some defaults are assumed (but only when not in strict mode).
124 # case some defaults are assumed (but only when not in strict mode).
125 assert not strict
125 assert not strict
126
126
127 spec, params = parseparams(spec)
127 spec, params = parseparams(spec)
128
128
129 if spec in util.compengines.supportedbundlenames:
129 if spec in util.compengines.supportedbundlenames:
130 compression = spec
130 compression = spec
131 version = 'v1'
131 version = 'v1'
132 # Generaldelta repos require v2.
132 # Generaldelta repos require v2.
133 if 'generaldelta' in repo.requirements:
133 if 'generaldelta' in repo.requirements:
134 version = 'v2'
134 version = 'v2'
135 # Modern compression engines require v2.
135 # Modern compression engines require v2.
136 if compression not in _bundlespecv1compengines:
136 if compression not in _bundlespecv1compengines:
137 version = 'v2'
137 version = 'v2'
138 elif spec in _bundlespeccgversions:
138 elif spec in _bundlespeccgversions:
139 if spec == 'packed1':
139 if spec == 'packed1':
140 compression = 'none'
140 compression = 'none'
141 else:
141 else:
142 compression = 'bzip2'
142 compression = 'bzip2'
143 version = spec
143 version = spec
144 else:
144 else:
145 raise error.UnsupportedBundleSpecification(
145 raise error.UnsupportedBundleSpecification(
146 _('%s is not a recognized bundle specification') % spec)
146 _('%s is not a recognized bundle specification') % spec)
147
147
148 # Bundle version 1 only supports a known set of compression engines.
148 # Bundle version 1 only supports a known set of compression engines.
149 if version == 'v1' and compression not in _bundlespecv1compengines:
149 if version == 'v1' and compression not in _bundlespecv1compengines:
150 raise error.UnsupportedBundleSpecification(
150 raise error.UnsupportedBundleSpecification(
151 _('compression engine %s is not supported on v1 bundles') %
151 _('compression engine %s is not supported on v1 bundles') %
152 compression)
152 compression)
153
153
154 # The specification for packed1 can optionally declare the data formats
154 # The specification for packed1 can optionally declare the data formats
155 # required to apply it. If we see this metadata, compare against what the
155 # required to apply it. If we see this metadata, compare against what the
156 # repo supports and error if the bundle isn't compatible.
156 # repo supports and error if the bundle isn't compatible.
157 if version == 'packed1' and 'requirements' in params:
157 if version == 'packed1' and 'requirements' in params:
158 requirements = set(params['requirements'].split(','))
158 requirements = set(params['requirements'].split(','))
159 missingreqs = requirements - repo.supportedformats
159 missingreqs = requirements - repo.supportedformats
160 if missingreqs:
160 if missingreqs:
161 raise error.UnsupportedBundleSpecification(
161 raise error.UnsupportedBundleSpecification(
162 _('missing support for repository features: %s') %
162 _('missing support for repository features: %s') %
163 ', '.join(sorted(missingreqs)))
163 ', '.join(sorted(missingreqs)))
164
164
165 if not externalnames:
165 if not externalnames:
166 engine = util.compengines.forbundlename(compression)
166 engine = util.compengines.forbundlename(compression)
167 compression = engine.bundletype()[1]
167 compression = engine.bundletype()[1]
168 version = _bundlespeccgversions[version]
168 version = _bundlespeccgversions[version]
169 return compression, version, params
169 return compression, version, params
170
170
171 def readbundle(ui, fh, fname, vfs=None):
171 def readbundle(ui, fh, fname, vfs=None):
172 header = changegroup.readexactly(fh, 4)
172 header = changegroup.readexactly(fh, 4)
173
173
174 alg = None
174 alg = None
175 if not fname:
175 if not fname:
176 fname = "stream"
176 fname = "stream"
177 if not header.startswith('HG') and header.startswith('\0'):
177 if not header.startswith('HG') and header.startswith('\0'):
178 fh = changegroup.headerlessfixup(fh, header)
178 fh = changegroup.headerlessfixup(fh, header)
179 header = "HG10"
179 header = "HG10"
180 alg = 'UN'
180 alg = 'UN'
181 elif vfs:
181 elif vfs:
182 fname = vfs.join(fname)
182 fname = vfs.join(fname)
183
183
184 magic, version = header[0:2], header[2:4]
184 magic, version = header[0:2], header[2:4]
185
185
186 if magic != 'HG':
186 if magic != 'HG':
187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 if version == '10':
188 if version == '10':
189 if alg is None:
189 if alg is None:
190 alg = changegroup.readexactly(fh, 2)
190 alg = changegroup.readexactly(fh, 2)
191 return changegroup.cg1unpacker(fh, alg)
191 return changegroup.cg1unpacker(fh, alg)
192 elif version.startswith('2'):
192 elif version.startswith('2'):
193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 elif version == 'S1':
194 elif version == 'S1':
195 return streamclone.streamcloneapplier(fh)
195 return streamclone.streamcloneapplier(fh)
196 else:
196 else:
197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198
198
199 def getbundlespec(ui, fh):
199 def getbundlespec(ui, fh):
200 """Infer the bundlespec from a bundle file handle.
200 """Infer the bundlespec from a bundle file handle.
201
201
202 The input file handle is seeked and the original seek position is not
202 The input file handle is seeked and the original seek position is not
203 restored.
203 restored.
204 """
204 """
205 def speccompression(alg):
205 def speccompression(alg):
206 try:
206 try:
207 return util.compengines.forbundletype(alg).bundletype()[0]
207 return util.compengines.forbundletype(alg).bundletype()[0]
208 except KeyError:
208 except KeyError:
209 return None
209 return None
210
210
211 b = readbundle(ui, fh, None)
211 b = readbundle(ui, fh, None)
212 if isinstance(b, changegroup.cg1unpacker):
212 if isinstance(b, changegroup.cg1unpacker):
213 alg = b._type
213 alg = b._type
214 if alg == '_truncatedBZ':
214 if alg == '_truncatedBZ':
215 alg = 'BZ'
215 alg = 'BZ'
216 comp = speccompression(alg)
216 comp = speccompression(alg)
217 if not comp:
217 if not comp:
218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 return '%s-v1' % comp
219 return '%s-v1' % comp
220 elif isinstance(b, bundle2.unbundle20):
220 elif isinstance(b, bundle2.unbundle20):
221 if 'Compression' in b.params:
221 if 'Compression' in b.params:
222 comp = speccompression(b.params['Compression'])
222 comp = speccompression(b.params['Compression'])
223 if not comp:
223 if not comp:
224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 else:
225 else:
226 comp = 'none'
226 comp = 'none'
227
227
228 version = None
228 version = None
229 for part in b.iterparts():
229 for part in b.iterparts():
230 if part.type == 'changegroup':
230 if part.type == 'changegroup':
231 version = part.params['version']
231 version = part.params['version']
232 if version in ('01', '02'):
232 if version in ('01', '02'):
233 version = 'v2'
233 version = 'v2'
234 else:
234 else:
235 raise error.Abort(_('changegroup version %s does not have '
235 raise error.Abort(_('changegroup version %s does not have '
236 'a known bundlespec') % version,
236 'a known bundlespec') % version,
237 hint=_('try upgrading your Mercurial '
237 hint=_('try upgrading your Mercurial '
238 'client'))
238 'client'))
239
239
240 if not version:
240 if not version:
241 raise error.Abort(_('could not identify changegroup version in '
241 raise error.Abort(_('could not identify changegroup version in '
242 'bundle'))
242 'bundle'))
243
243
244 return '%s-%s' % (comp, version)
244 return '%s-%s' % (comp, version)
245 elif isinstance(b, streamclone.streamcloneapplier):
245 elif isinstance(b, streamclone.streamcloneapplier):
246 requirements = streamclone.readbundle1header(fh)[2]
246 requirements = streamclone.readbundle1header(fh)[2]
247 params = 'requirements=%s' % ','.join(sorted(requirements))
247 params = 'requirements=%s' % ','.join(sorted(requirements))
248 return 'none-packed1;%s' % urlreq.quote(params)
248 return 'none-packed1;%s' % urlreq.quote(params)
249 else:
249 else:
250 raise error.Abort(_('unknown bundle type: %s') % b)
250 raise error.Abort(_('unknown bundle type: %s') % b)
251
251
252 def _computeoutgoing(repo, heads, common):
252 def _computeoutgoing(repo, heads, common):
253 """Computes which revs are outgoing given a set of common
253 """Computes which revs are outgoing given a set of common
254 and a set of heads.
254 and a set of heads.
255
255
256 This is a separate function so extensions can have access to
256 This is a separate function so extensions can have access to
257 the logic.
257 the logic.
258
258
259 Returns a discovery.outgoing object.
259 Returns a discovery.outgoing object.
260 """
260 """
261 cl = repo.changelog
261 cl = repo.changelog
262 if common:
262 if common:
263 hasnode = cl.hasnode
263 hasnode = cl.hasnode
264 common = [n for n in common if hasnode(n)]
264 common = [n for n in common if hasnode(n)]
265 else:
265 else:
266 common = [nullid]
266 common = [nullid]
267 if not heads:
267 if not heads:
268 heads = cl.heads()
268 heads = cl.heads()
269 return discovery.outgoing(repo, common, heads)
269 return discovery.outgoing(repo, common, heads)
270
270
271 def _forcebundle1(op):
271 def _forcebundle1(op):
272 """return true if a pull/push must use bundle1
272 """return true if a pull/push must use bundle1
273
273
274 This function is used to allow testing of the older bundle version"""
274 This function is used to allow testing of the older bundle version"""
275 ui = op.repo.ui
275 ui = op.repo.ui
276 forcebundle1 = False
276 forcebundle1 = False
277 # The goal is this config is to allow developer to choose the bundle
277 # The goal is this config is to allow developer to choose the bundle
278 # version used during exchanged. This is especially handy during test.
278 # version used during exchanged. This is especially handy during test.
279 # Value is a list of bundle version to be picked from, highest version
279 # Value is a list of bundle version to be picked from, highest version
280 # should be used.
280 # should be used.
281 #
281 #
282 # developer config: devel.legacy.exchange
282 # developer config: devel.legacy.exchange
283 exchange = ui.configlist('devel', 'legacy.exchange')
283 exchange = ui.configlist('devel', 'legacy.exchange')
284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 return forcebundle1 or not op.remote.capable('bundle2')
285 return forcebundle1 or not op.remote.capable('bundle2')
286
286
287 class pushoperation(object):
287 class pushoperation(object):
288 """A object that represent a single push operation
288 """A object that represent a single push operation
289
289
290 Its purpose is to carry push related state and very common operations.
290 Its purpose is to carry push related state and very common operations.
291
291
292 A new pushoperation should be created at the beginning of each push and
292 A new pushoperation should be created at the beginning of each push and
293 discarded afterward.
293 discarded afterward.
294 """
294 """
295
295
296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 bookmarks=(), pushvars=None):
297 bookmarks=(), pushvars=None):
298 # repo we push from
298 # repo we push from
299 self.repo = repo
299 self.repo = repo
300 self.ui = repo.ui
300 self.ui = repo.ui
301 # repo we push to
301 # repo we push to
302 self.remote = remote
302 self.remote = remote
303 # force option provided
303 # force option provided
304 self.force = force
304 self.force = force
305 # revs to be pushed (None is "all")
305 # revs to be pushed (None is "all")
306 self.revs = revs
306 self.revs = revs
307 # bookmark explicitly pushed
307 # bookmark explicitly pushed
308 self.bookmarks = bookmarks
308 self.bookmarks = bookmarks
309 # allow push of new branch
309 # allow push of new branch
310 self.newbranch = newbranch
310 self.newbranch = newbranch
311 # step already performed
311 # step already performed
312 # (used to check what steps have been already performed through bundle2)
312 # (used to check what steps have been already performed through bundle2)
313 self.stepsdone = set()
313 self.stepsdone = set()
314 # Integer version of the changegroup push result
314 # Integer version of the changegroup push result
315 # - None means nothing to push
315 # - None means nothing to push
316 # - 0 means HTTP error
316 # - 0 means HTTP error
317 # - 1 means we pushed and remote head count is unchanged *or*
317 # - 1 means we pushed and remote head count is unchanged *or*
318 # we have outgoing changesets but refused to push
318 # we have outgoing changesets but refused to push
319 # - other values as described by addchangegroup()
319 # - other values as described by addchangegroup()
320 self.cgresult = None
320 self.cgresult = None
321 # Boolean value for the bookmark push
321 # Boolean value for the bookmark push
322 self.bkresult = None
322 self.bkresult = None
323 # discover.outgoing object (contains common and outgoing data)
323 # discover.outgoing object (contains common and outgoing data)
324 self.outgoing = None
324 self.outgoing = None
325 # all remote topological heads before the push
325 # all remote topological heads before the push
326 self.remoteheads = None
326 self.remoteheads = None
327 # Details of the remote branch pre and post push
327 # Details of the remote branch pre and post push
328 #
328 #
329 # mapping: {'branch': ([remoteheads],
329 # mapping: {'branch': ([remoteheads],
330 # [newheads],
330 # [newheads],
331 # [unsyncedheads],
331 # [unsyncedheads],
332 # [discardedheads])}
332 # [discardedheads])}
333 # - branch: the branch name
333 # - branch: the branch name
334 # - remoteheads: the list of remote heads known locally
334 # - remoteheads: the list of remote heads known locally
335 # None if the branch is new
335 # None if the branch is new
336 # - newheads: the new remote heads (known locally) with outgoing pushed
336 # - newheads: the new remote heads (known locally) with outgoing pushed
337 # - unsyncedheads: the list of remote heads unknown locally.
337 # - unsyncedheads: the list of remote heads unknown locally.
338 # - discardedheads: the list of remote heads made obsolete by the push
338 # - discardedheads: the list of remote heads made obsolete by the push
339 self.pushbranchmap = None
339 self.pushbranchmap = None
340 # testable as a boolean indicating if any nodes are missing locally.
340 # testable as a boolean indicating if any nodes are missing locally.
341 self.incoming = None
341 self.incoming = None
342 # phases changes that must be pushed along side the changesets
342 # phases changes that must be pushed along side the changesets
343 self.outdatedphases = None
343 self.outdatedphases = None
344 # phases changes that must be pushed if changeset push fails
344 # phases changes that must be pushed if changeset push fails
345 self.fallbackoutdatedphases = None
345 self.fallbackoutdatedphases = None
346 # outgoing obsmarkers
346 # outgoing obsmarkers
347 self.outobsmarkers = set()
347 self.outobsmarkers = set()
348 # outgoing bookmarks
348 # outgoing bookmarks
349 self.outbookmarks = []
349 self.outbookmarks = []
350 # transaction manager
350 # transaction manager
351 self.trmanager = None
351 self.trmanager = None
352 # map { pushkey partid -> callback handling failure}
352 # map { pushkey partid -> callback handling failure}
353 # used to handle exception from mandatory pushkey part failure
353 # used to handle exception from mandatory pushkey part failure
354 self.pkfailcb = {}
354 self.pkfailcb = {}
355 # an iterable of pushvars or None
355 # an iterable of pushvars or None
356 self.pushvars = pushvars
356 self.pushvars = pushvars
357
357
358 @util.propertycache
358 @util.propertycache
359 def futureheads(self):
359 def futureheads(self):
360 """future remote heads if the changeset push succeeds"""
360 """future remote heads if the changeset push succeeds"""
361 return self.outgoing.missingheads
361 return self.outgoing.missingheads
362
362
363 @util.propertycache
363 @util.propertycache
364 def fallbackheads(self):
364 def fallbackheads(self):
365 """future remote heads if the changeset push fails"""
365 """future remote heads if the changeset push fails"""
366 if self.revs is None:
366 if self.revs is None:
367 # not target to push, all common are relevant
367 # not target to push, all common are relevant
368 return self.outgoing.commonheads
368 return self.outgoing.commonheads
369 unfi = self.repo.unfiltered()
369 unfi = self.repo.unfiltered()
370 # I want cheads = heads(::missingheads and ::commonheads)
370 # I want cheads = heads(::missingheads and ::commonheads)
371 # (missingheads is revs with secret changeset filtered out)
371 # (missingheads is revs with secret changeset filtered out)
372 #
372 #
373 # This can be expressed as:
373 # This can be expressed as:
374 # cheads = ( (missingheads and ::commonheads)
374 # cheads = ( (missingheads and ::commonheads)
375 # + (commonheads and ::missingheads))"
375 # + (commonheads and ::missingheads))"
376 # )
376 # )
377 #
377 #
378 # while trying to push we already computed the following:
378 # while trying to push we already computed the following:
379 # common = (::commonheads)
379 # common = (::commonheads)
380 # missing = ((commonheads::missingheads) - commonheads)
380 # missing = ((commonheads::missingheads) - commonheads)
381 #
381 #
382 # We can pick:
382 # We can pick:
383 # * missingheads part of common (::commonheads)
383 # * missingheads part of common (::commonheads)
384 common = self.outgoing.common
384 common = self.outgoing.common
385 nm = self.repo.changelog.nodemap
385 nm = self.repo.changelog.nodemap
386 cheads = [node for node in self.revs if nm[node] in common]
386 cheads = [node for node in self.revs if nm[node] in common]
387 # and
387 # and
388 # * commonheads parents on missing
388 # * commonheads parents on missing
389 revset = unfi.set('%ln and parents(roots(%ln))',
389 revset = unfi.set('%ln and parents(roots(%ln))',
390 self.outgoing.commonheads,
390 self.outgoing.commonheads,
391 self.outgoing.missing)
391 self.outgoing.missing)
392 cheads.extend(c.node() for c in revset)
392 cheads.extend(c.node() for c in revset)
393 return cheads
393 return cheads
394
394
395 @property
395 @property
396 def commonheads(self):
396 def commonheads(self):
397 """set of all common heads after changeset bundle push"""
397 """set of all common heads after changeset bundle push"""
398 if self.cgresult:
398 if self.cgresult:
399 return self.futureheads
399 return self.futureheads
400 else:
400 else:
401 return self.fallbackheads
401 return self.fallbackheads
402
402
403 # mapping of message used when pushing bookmark
403 # mapping of message used when pushing bookmark
404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
405 _('updating bookmark %s failed!\n')),
405 _('updating bookmark %s failed!\n')),
406 'export': (_("exporting bookmark %s\n"),
406 'export': (_("exporting bookmark %s\n"),
407 _('exporting bookmark %s failed!\n')),
407 _('exporting bookmark %s failed!\n')),
408 'delete': (_("deleting remote bookmark %s\n"),
408 'delete': (_("deleting remote bookmark %s\n"),
409 _('deleting remote bookmark %s failed!\n')),
409 _('deleting remote bookmark %s failed!\n')),
410 }
410 }
411
411
412
412
413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
414 opargs=None):
414 opargs=None):
415 '''Push outgoing changesets (limited by revs) from a local
415 '''Push outgoing changesets (limited by revs) from a local
416 repository to remote. Return an integer:
416 repository to remote. Return an integer:
417 - None means nothing to push
417 - None means nothing to push
418 - 0 means HTTP error
418 - 0 means HTTP error
419 - 1 means we pushed and remote head count is unchanged *or*
419 - 1 means we pushed and remote head count is unchanged *or*
420 we have outgoing changesets but refused to push
420 we have outgoing changesets but refused to push
421 - other values as described by addchangegroup()
421 - other values as described by addchangegroup()
422 '''
422 '''
423 if opargs is None:
423 if opargs is None:
424 opargs = {}
424 opargs = {}
425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
426 **opargs)
426 **opargs)
427 if pushop.remote.local():
427 if pushop.remote.local():
428 missing = (set(pushop.repo.requirements)
428 missing = (set(pushop.repo.requirements)
429 - pushop.remote.local().supported)
429 - pushop.remote.local().supported)
430 if missing:
430 if missing:
431 msg = _("required features are not"
431 msg = _("required features are not"
432 " supported in the destination:"
432 " supported in the destination:"
433 " %s") % (', '.join(sorted(missing)))
433 " %s") % (', '.join(sorted(missing)))
434 raise error.Abort(msg)
434 raise error.Abort(msg)
435
435
436 if not pushop.remote.canpush():
436 if not pushop.remote.canpush():
437 raise error.Abort(_("destination does not support push"))
437 raise error.Abort(_("destination does not support push"))
438
438
439 if not pushop.remote.capable('unbundle'):
439 if not pushop.remote.capable('unbundle'):
440 raise error.Abort(_('cannot push: destination does not support the '
440 raise error.Abort(_('cannot push: destination does not support the '
441 'unbundle wire protocol command'))
441 'unbundle wire protocol command'))
442
442
443 # get lock as we might write phase data
443 # get lock as we might write phase data
444 wlock = lock = None
444 wlock = lock = None
445 try:
445 try:
446 # bundle2 push may receive a reply bundle touching bookmarks or other
446 # bundle2 push may receive a reply bundle touching bookmarks or other
447 # things requiring the wlock. Take it now to ensure proper ordering.
447 # things requiring the wlock. Take it now to ensure proper ordering.
448 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
448 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
449 if (not _forcebundle1(pushop)) and maypushback:
449 if (not _forcebundle1(pushop)) and maypushback:
450 wlock = pushop.repo.wlock()
450 wlock = pushop.repo.wlock()
451 lock = pushop.repo.lock()
451 lock = pushop.repo.lock()
452 pushop.trmanager = transactionmanager(pushop.repo,
452 pushop.trmanager = transactionmanager(pushop.repo,
453 'push-response',
453 'push-response',
454 pushop.remote.url())
454 pushop.remote.url())
455 except IOError as err:
455 except IOError as err:
456 if err.errno != errno.EACCES:
456 if err.errno != errno.EACCES:
457 raise
457 raise
458 # source repo cannot be locked.
458 # source repo cannot be locked.
459 # We do not abort the push, but just disable the local phase
459 # We do not abort the push, but just disable the local phase
460 # synchronisation.
460 # synchronisation.
461 msg = 'cannot lock source repository: %s\n' % err
461 msg = 'cannot lock source repository: %s\n' % err
462 pushop.ui.debug(msg)
462 pushop.ui.debug(msg)
463
463
464 with wlock or util.nullcontextmanager(), \
464 with wlock or util.nullcontextmanager(), \
465 lock or util.nullcontextmanager(), \
465 lock or util.nullcontextmanager(), \
466 pushop.trmanager or util.nullcontextmanager():
466 pushop.trmanager or util.nullcontextmanager():
467 pushop.repo.checkpush(pushop)
467 pushop.repo.checkpush(pushop)
468 _pushdiscovery(pushop)
468 _pushdiscovery(pushop)
469 if not _forcebundle1(pushop):
469 if not _forcebundle1(pushop):
470 _pushbundle2(pushop)
470 _pushbundle2(pushop)
471 _pushchangeset(pushop)
471 _pushchangeset(pushop)
472 _pushsyncphase(pushop)
472 _pushsyncphase(pushop)
473 _pushobsolete(pushop)
473 _pushobsolete(pushop)
474 _pushbookmark(pushop)
474 _pushbookmark(pushop)
475
475
476 return pushop
476 return pushop
477
477
478 # list of steps to perform discovery before push
478 # list of steps to perform discovery before push
479 pushdiscoveryorder = []
479 pushdiscoveryorder = []
480
480
481 # Mapping between step name and function
481 # Mapping between step name and function
482 #
482 #
483 # This exists to help extensions wrap steps if necessary
483 # This exists to help extensions wrap steps if necessary
484 pushdiscoverymapping = {}
484 pushdiscoverymapping = {}
485
485
486 def pushdiscovery(stepname):
486 def pushdiscovery(stepname):
487 """decorator for function performing discovery before push
487 """decorator for function performing discovery before push
488
488
489 The function is added to the step -> function mapping and appended to the
489 The function is added to the step -> function mapping and appended to the
490 list of steps. Beware that decorated function will be added in order (this
490 list of steps. Beware that decorated function will be added in order (this
491 may matter).
491 may matter).
492
492
493 You can only use this decorator for a new step, if you want to wrap a step
493 You can only use this decorator for a new step, if you want to wrap a step
494 from an extension, change the pushdiscovery dictionary directly."""
494 from an extension, change the pushdiscovery dictionary directly."""
495 def dec(func):
495 def dec(func):
496 assert stepname not in pushdiscoverymapping
496 assert stepname not in pushdiscoverymapping
497 pushdiscoverymapping[stepname] = func
497 pushdiscoverymapping[stepname] = func
498 pushdiscoveryorder.append(stepname)
498 pushdiscoveryorder.append(stepname)
499 return func
499 return func
500 return dec
500 return dec
501
501
502 def _pushdiscovery(pushop):
502 def _pushdiscovery(pushop):
503 """Run all discovery steps"""
503 """Run all discovery steps"""
504 for stepname in pushdiscoveryorder:
504 for stepname in pushdiscoveryorder:
505 step = pushdiscoverymapping[stepname]
505 step = pushdiscoverymapping[stepname]
506 step(pushop)
506 step(pushop)
507
507
508 @pushdiscovery('changeset')
508 @pushdiscovery('changeset')
509 def _pushdiscoverychangeset(pushop):
509 def _pushdiscoverychangeset(pushop):
510 """discover the changeset that need to be pushed"""
510 """discover the changeset that need to be pushed"""
511 fci = discovery.findcommonincoming
511 fci = discovery.findcommonincoming
512 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
512 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
513 common, inc, remoteheads = commoninc
513 common, inc, remoteheads = commoninc
514 fco = discovery.findcommonoutgoing
514 fco = discovery.findcommonoutgoing
515 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
515 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
516 commoninc=commoninc, force=pushop.force)
516 commoninc=commoninc, force=pushop.force)
517 pushop.outgoing = outgoing
517 pushop.outgoing = outgoing
518 pushop.remoteheads = remoteheads
518 pushop.remoteheads = remoteheads
519 pushop.incoming = inc
519 pushop.incoming = inc
520
520
521 @pushdiscovery('phase')
521 @pushdiscovery('phase')
522 def _pushdiscoveryphase(pushop):
522 def _pushdiscoveryphase(pushop):
523 """discover the phase that needs to be pushed
523 """discover the phase that needs to be pushed
524
524
525 (computed for both success and failure case for changesets push)"""
525 (computed for both success and failure case for changesets push)"""
526 outgoing = pushop.outgoing
526 outgoing = pushop.outgoing
527 unfi = pushop.repo.unfiltered()
527 unfi = pushop.repo.unfiltered()
528 remotephases = pushop.remote.listkeys('phases')
528 remotephases = pushop.remote.listkeys('phases')
529 publishing = remotephases.get('publishing', False)
529 publishing = remotephases.get('publishing', False)
530 if (pushop.ui.configbool('ui', '_usedassubrepo')
530 if (pushop.ui.configbool('ui', '_usedassubrepo')
531 and remotephases # server supports phases
531 and remotephases # server supports phases
532 and not pushop.outgoing.missing # no changesets to be pushed
532 and not pushop.outgoing.missing # no changesets to be pushed
533 and publishing):
533 and publishing):
534 # When:
534 # When:
535 # - this is a subrepo push
535 # - this is a subrepo push
536 # - and remote support phase
536 # - and remote support phase
537 # - and no changeset are to be pushed
537 # - and no changeset are to be pushed
538 # - and remote is publishing
538 # - and remote is publishing
539 # We may be in issue 3871 case!
539 # We may be in issue 3871 case!
540 # We drop the possible phase synchronisation done by
540 # We drop the possible phase synchronisation done by
541 # courtesy to publish changesets possibly locally draft
541 # courtesy to publish changesets possibly locally draft
542 # on the remote.
542 # on the remote.
543 remotephases = {'publishing': 'True'}
543 remotephases = {'publishing': 'True'}
544 ana = phases.analyzeremotephases(pushop.repo,
544 ana = phases.analyzeremotephases(pushop.repo,
545 pushop.fallbackheads,
545 pushop.fallbackheads,
546 remotephases)
546 remotephases)
547 pheads, droots = ana
547 pheads, droots = ana
548 extracond = ''
548 extracond = ''
549 if not publishing:
549 if not publishing:
550 extracond = ' and public()'
550 extracond = ' and public()'
551 revset = 'heads((%%ln::%%ln) %s)' % extracond
551 revset = 'heads((%%ln::%%ln) %s)' % extracond
552 # Get the list of all revs draft on remote by public here.
552 # Get the list of all revs draft on remote by public here.
553 # XXX Beware that revset break if droots is not strictly
553 # XXX Beware that revset break if droots is not strictly
554 # XXX root we may want to ensure it is but it is costly
554 # XXX root we may want to ensure it is but it is costly
555 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
555 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
556 if not outgoing.missing:
556 if not outgoing.missing:
557 future = fallback
557 future = fallback
558 else:
558 else:
559 # adds changeset we are going to push as draft
559 # adds changeset we are going to push as draft
560 #
560 #
561 # should not be necessary for publishing server, but because of an
561 # should not be necessary for publishing server, but because of an
562 # issue fixed in xxxxx we have to do it anyway.
562 # issue fixed in xxxxx we have to do it anyway.
563 fdroots = list(unfi.set('roots(%ln + %ln::)',
563 fdroots = list(unfi.set('roots(%ln + %ln::)',
564 outgoing.missing, droots))
564 outgoing.missing, droots))
565 fdroots = [f.node() for f in fdroots]
565 fdroots = [f.node() for f in fdroots]
566 future = list(unfi.set(revset, fdroots, pushop.futureheads))
566 future = list(unfi.set(revset, fdroots, pushop.futureheads))
567 pushop.outdatedphases = future
567 pushop.outdatedphases = future
568 pushop.fallbackoutdatedphases = fallback
568 pushop.fallbackoutdatedphases = fallback
569
569
570 @pushdiscovery('obsmarker')
570 @pushdiscovery('obsmarker')
571 def _pushdiscoveryobsmarkers(pushop):
571 def _pushdiscoveryobsmarkers(pushop):
572 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
572 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
573 and pushop.repo.obsstore
573 and pushop.repo.obsstore
574 and 'obsolete' in pushop.remote.listkeys('namespaces')):
574 and 'obsolete' in pushop.remote.listkeys('namespaces')):
575 repo = pushop.repo
575 repo = pushop.repo
576 # very naive computation, that can be quite expensive on big repo.
576 # very naive computation, that can be quite expensive on big repo.
577 # However: evolution is currently slow on them anyway.
577 # However: evolution is currently slow on them anyway.
578 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
578 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
579 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
579 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
580
580
581 @pushdiscovery('bookmarks')
581 @pushdiscovery('bookmarks')
582 def _pushdiscoverybookmarks(pushop):
582 def _pushdiscoverybookmarks(pushop):
583 ui = pushop.ui
583 ui = pushop.ui
584 repo = pushop.repo.unfiltered()
584 repo = pushop.repo.unfiltered()
585 remote = pushop.remote
585 remote = pushop.remote
586 ui.debug("checking for updated bookmarks\n")
586 ui.debug("checking for updated bookmarks\n")
587 ancestors = ()
587 ancestors = ()
588 if pushop.revs:
588 if pushop.revs:
589 revnums = map(repo.changelog.rev, pushop.revs)
589 revnums = map(repo.changelog.rev, pushop.revs)
590 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
590 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
591 remotebookmark = remote.listkeys('bookmarks')
591 remotebookmark = remote.listkeys('bookmarks')
592
592
593 explicit = set([repo._bookmarks.expandname(bookmark)
593 explicit = set([repo._bookmarks.expandname(bookmark)
594 for bookmark in pushop.bookmarks])
594 for bookmark in pushop.bookmarks])
595
595
596 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
596 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
597 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
597 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
598
598
599 def safehex(x):
599 def safehex(x):
600 if x is None:
600 if x is None:
601 return x
601 return x
602 return hex(x)
602 return hex(x)
603
603
604 def hexifycompbookmarks(bookmarks):
604 def hexifycompbookmarks(bookmarks):
605 for b, scid, dcid in bookmarks:
605 for b, scid, dcid in bookmarks:
606 yield b, safehex(scid), safehex(dcid)
606 yield b, safehex(scid), safehex(dcid)
607
607
608 comp = [hexifycompbookmarks(marks) for marks in comp]
608 comp = [hexifycompbookmarks(marks) for marks in comp]
609 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
609 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
610
610
611 for b, scid, dcid in advsrc:
611 for b, scid, dcid in advsrc:
612 if b in explicit:
612 if b in explicit:
613 explicit.remove(b)
613 explicit.remove(b)
614 if not ancestors or repo[scid].rev() in ancestors:
614 if not ancestors or repo[scid].rev() in ancestors:
615 pushop.outbookmarks.append((b, dcid, scid))
615 pushop.outbookmarks.append((b, dcid, scid))
616 # search added bookmark
616 # search added bookmark
617 for b, scid, dcid in addsrc:
617 for b, scid, dcid in addsrc:
618 if b in explicit:
618 if b in explicit:
619 explicit.remove(b)
619 explicit.remove(b)
620 pushop.outbookmarks.append((b, '', scid))
620 pushop.outbookmarks.append((b, '', scid))
621 # search for overwritten bookmark
621 # search for overwritten bookmark
622 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
622 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
623 if b in explicit:
623 if b in explicit:
624 explicit.remove(b)
624 explicit.remove(b)
625 pushop.outbookmarks.append((b, dcid, scid))
625 pushop.outbookmarks.append((b, dcid, scid))
626 # search for bookmark to delete
626 # search for bookmark to delete
627 for b, scid, dcid in adddst:
627 for b, scid, dcid in adddst:
628 if b in explicit:
628 if b in explicit:
629 explicit.remove(b)
629 explicit.remove(b)
630 # treat as "deleted locally"
630 # treat as "deleted locally"
631 pushop.outbookmarks.append((b, dcid, ''))
631 pushop.outbookmarks.append((b, dcid, ''))
632 # identical bookmarks shouldn't get reported
632 # identical bookmarks shouldn't get reported
633 for b, scid, dcid in same:
633 for b, scid, dcid in same:
634 if b in explicit:
634 if b in explicit:
635 explicit.remove(b)
635 explicit.remove(b)
636
636
637 if explicit:
637 if explicit:
638 explicit = sorted(explicit)
638 explicit = sorted(explicit)
639 # we should probably list all of them
639 # we should probably list all of them
640 ui.warn(_('bookmark %s does not exist on the local '
640 ui.warn(_('bookmark %s does not exist on the local '
641 'or remote repository!\n') % explicit[0])
641 'or remote repository!\n') % explicit[0])
642 pushop.bkresult = 2
642 pushop.bkresult = 2
643
643
644 pushop.outbookmarks.sort()
644 pushop.outbookmarks.sort()
645
645
646 def _pushcheckoutgoing(pushop):
646 def _pushcheckoutgoing(pushop):
647 outgoing = pushop.outgoing
647 outgoing = pushop.outgoing
648 unfi = pushop.repo.unfiltered()
648 unfi = pushop.repo.unfiltered()
649 if not outgoing.missing:
649 if not outgoing.missing:
650 # nothing to push
650 # nothing to push
651 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
651 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
652 return False
652 return False
653 # something to push
653 # something to push
654 if not pushop.force:
654 if not pushop.force:
655 # if repo.obsstore == False --> no obsolete
655 # if repo.obsstore == False --> no obsolete
656 # then, save the iteration
656 # then, save the iteration
657 if unfi.obsstore:
657 if unfi.obsstore:
658 # this message are here for 80 char limit reason
658 # this message are here for 80 char limit reason
659 mso = _("push includes obsolete changeset: %s!")
659 mso = _("push includes obsolete changeset: %s!")
660 mspd = _("push includes phase-divergent changeset: %s!")
660 mspd = _("push includes phase-divergent changeset: %s!")
661 mscd = _("push includes content-divergent changeset: %s!")
661 mscd = _("push includes content-divergent changeset: %s!")
662 mst = {"orphan": _("push includes orphan changeset: %s!"),
662 mst = {"orphan": _("push includes orphan changeset: %s!"),
663 "phase-divergent": mspd,
663 "phase-divergent": mspd,
664 "content-divergent": mscd}
664 "content-divergent": mscd}
665 # If we are to push if there is at least one
665 # If we are to push if there is at least one
666 # obsolete or unstable changeset in missing, at
666 # obsolete or unstable changeset in missing, at
667 # least one of the missinghead will be obsolete or
667 # least one of the missinghead will be obsolete or
668 # unstable. So checking heads only is ok
668 # unstable. So checking heads only is ok
669 for node in outgoing.missingheads:
669 for node in outgoing.missingheads:
670 ctx = unfi[node]
670 ctx = unfi[node]
671 if ctx.obsolete():
671 if ctx.obsolete():
672 raise error.Abort(mso % ctx)
672 raise error.Abort(mso % ctx)
673 elif ctx.isunstable():
673 elif ctx.isunstable():
674 # TODO print more than one instability in the abort
674 # TODO print more than one instability in the abort
675 # message
675 # message
676 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
676 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
677
677
678 discovery.checkheads(pushop)
678 discovery.checkheads(pushop)
679 return True
679 return True
680
680
681 # List of names of steps to perform for an outgoing bundle2, order matters.
681 # List of names of steps to perform for an outgoing bundle2, order matters.
682 b2partsgenorder = []
682 b2partsgenorder = []
683
683
684 # Mapping between step name and function
684 # Mapping between step name and function
685 #
685 #
686 # This exists to help extensions wrap steps if necessary
686 # This exists to help extensions wrap steps if necessary
687 b2partsgenmapping = {}
687 b2partsgenmapping = {}
688
688
689 def b2partsgenerator(stepname, idx=None):
689 def b2partsgenerator(stepname, idx=None):
690 """decorator for function generating bundle2 part
690 """decorator for function generating bundle2 part
691
691
692 The function is added to the step -> function mapping and appended to the
692 The function is added to the step -> function mapping and appended to the
693 list of steps. Beware that decorated functions will be added in order
693 list of steps. Beware that decorated functions will be added in order
694 (this may matter).
694 (this may matter).
695
695
696 You can only use this decorator for new steps, if you want to wrap a step
696 You can only use this decorator for new steps, if you want to wrap a step
697 from an extension, attack the b2partsgenmapping dictionary directly."""
697 from an extension, attack the b2partsgenmapping dictionary directly."""
698 def dec(func):
698 def dec(func):
699 assert stepname not in b2partsgenmapping
699 assert stepname not in b2partsgenmapping
700 b2partsgenmapping[stepname] = func
700 b2partsgenmapping[stepname] = func
701 if idx is None:
701 if idx is None:
702 b2partsgenorder.append(stepname)
702 b2partsgenorder.append(stepname)
703 else:
703 else:
704 b2partsgenorder.insert(idx, stepname)
704 b2partsgenorder.insert(idx, stepname)
705 return func
705 return func
706 return dec
706 return dec
707
707
708 def _pushb2ctxcheckheads(pushop, bundler):
708 def _pushb2ctxcheckheads(pushop, bundler):
709 """Generate race condition checking parts
709 """Generate race condition checking parts
710
710
711 Exists as an independent function to aid extensions
711 Exists as an independent function to aid extensions
712 """
712 """
713 # * 'force' do not check for push race,
713 # * 'force' do not check for push race,
714 # * if we don't push anything, there are nothing to check.
714 # * if we don't push anything, there are nothing to check.
715 if not pushop.force and pushop.outgoing.missingheads:
715 if not pushop.force and pushop.outgoing.missingheads:
716 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
716 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
717 emptyremote = pushop.pushbranchmap is None
717 emptyremote = pushop.pushbranchmap is None
718 if not allowunrelated or emptyremote:
718 if not allowunrelated or emptyremote:
719 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
719 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
720 else:
720 else:
721 affected = set()
721 affected = set()
722 for branch, heads in pushop.pushbranchmap.iteritems():
722 for branch, heads in pushop.pushbranchmap.iteritems():
723 remoteheads, newheads, unsyncedheads, discardedheads = heads
723 remoteheads, newheads, unsyncedheads, discardedheads = heads
724 if remoteheads is not None:
724 if remoteheads is not None:
725 remote = set(remoteheads)
725 remote = set(remoteheads)
726 affected |= set(discardedheads) & remote
726 affected |= set(discardedheads) & remote
727 affected |= remote - set(newheads)
727 affected |= remote - set(newheads)
728 if affected:
728 if affected:
729 data = iter(sorted(affected))
729 data = iter(sorted(affected))
730 bundler.newpart('check:updated-heads', data=data)
730 bundler.newpart('check:updated-heads', data=data)
731
731
732 @b2partsgenerator('changeset')
732 @b2partsgenerator('changeset')
733 def _pushb2ctx(pushop, bundler):
733 def _pushb2ctx(pushop, bundler):
734 """handle changegroup push through bundle2
734 """handle changegroup push through bundle2
735
735
736 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
736 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
737 """
737 """
738 if 'changesets' in pushop.stepsdone:
738 if 'changesets' in pushop.stepsdone:
739 return
739 return
740 pushop.stepsdone.add('changesets')
740 pushop.stepsdone.add('changesets')
741 # Send known heads to the server for race detection.
741 # Send known heads to the server for race detection.
742 if not _pushcheckoutgoing(pushop):
742 if not _pushcheckoutgoing(pushop):
743 return
743 return
744 pushop.repo.prepushoutgoinghooks(pushop)
744 pushop.repo.prepushoutgoinghooks(pushop)
745
745
746 _pushb2ctxcheckheads(pushop, bundler)
746 _pushb2ctxcheckheads(pushop, bundler)
747
747
748 b2caps = bundle2.bundle2caps(pushop.remote)
748 b2caps = bundle2.bundle2caps(pushop.remote)
749 version = '01'
749 version = '01'
750 cgversions = b2caps.get('changegroup')
750 cgversions = b2caps.get('changegroup')
751 if cgversions: # 3.1 and 3.2 ship with an empty value
751 if cgversions: # 3.1 and 3.2 ship with an empty value
752 cgversions = [v for v in cgversions
752 cgversions = [v for v in cgversions
753 if v in changegroup.supportedoutgoingversions(
753 if v in changegroup.supportedoutgoingversions(
754 pushop.repo)]
754 pushop.repo)]
755 if not cgversions:
755 if not cgversions:
756 raise ValueError(_('no common changegroup version'))
756 raise ValueError(_('no common changegroup version'))
757 version = max(cgversions)
757 version = max(cgversions)
758 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
758 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
759 pushop.outgoing,
759 'push')
760 version=version)
760 cgpart = bundler.newpart('changegroup', data=cgstream)
761 cgpart = bundler.newpart('changegroup', data=cg)
762 if cgversions:
761 if cgversions:
763 cgpart.addparam('version', version)
762 cgpart.addparam('version', version)
764 if 'treemanifest' in pushop.repo.requirements:
763 if 'treemanifest' in pushop.repo.requirements:
765 cgpart.addparam('treemanifest', '1')
764 cgpart.addparam('treemanifest', '1')
766 def handlereply(op):
765 def handlereply(op):
767 """extract addchangegroup returns from server reply"""
766 """extract addchangegroup returns from server reply"""
768 cgreplies = op.records.getreplies(cgpart.id)
767 cgreplies = op.records.getreplies(cgpart.id)
769 assert len(cgreplies['changegroup']) == 1
768 assert len(cgreplies['changegroup']) == 1
770 pushop.cgresult = cgreplies['changegroup'][0]['return']
769 pushop.cgresult = cgreplies['changegroup'][0]['return']
771 return handlereply
770 return handlereply
772
771
773 @b2partsgenerator('phase')
772 @b2partsgenerator('phase')
774 def _pushb2phases(pushop, bundler):
773 def _pushb2phases(pushop, bundler):
775 """handle phase push through bundle2"""
774 """handle phase push through bundle2"""
776 if 'phases' in pushop.stepsdone:
775 if 'phases' in pushop.stepsdone:
777 return
776 return
778 b2caps = bundle2.bundle2caps(pushop.remote)
777 b2caps = bundle2.bundle2caps(pushop.remote)
779 if not 'pushkey' in b2caps:
778 if not 'pushkey' in b2caps:
780 return
779 return
781 pushop.stepsdone.add('phases')
780 pushop.stepsdone.add('phases')
782 part2node = []
781 part2node = []
783
782
784 def handlefailure(pushop, exc):
783 def handlefailure(pushop, exc):
785 targetid = int(exc.partid)
784 targetid = int(exc.partid)
786 for partid, node in part2node:
785 for partid, node in part2node:
787 if partid == targetid:
786 if partid == targetid:
788 raise error.Abort(_('updating %s to public failed') % node)
787 raise error.Abort(_('updating %s to public failed') % node)
789
788
790 enc = pushkey.encode
789 enc = pushkey.encode
791 for newremotehead in pushop.outdatedphases:
790 for newremotehead in pushop.outdatedphases:
792 part = bundler.newpart('pushkey')
791 part = bundler.newpart('pushkey')
793 part.addparam('namespace', enc('phases'))
792 part.addparam('namespace', enc('phases'))
794 part.addparam('key', enc(newremotehead.hex()))
793 part.addparam('key', enc(newremotehead.hex()))
795 part.addparam('old', enc(str(phases.draft)))
794 part.addparam('old', enc(str(phases.draft)))
796 part.addparam('new', enc(str(phases.public)))
795 part.addparam('new', enc(str(phases.public)))
797 part2node.append((part.id, newremotehead))
796 part2node.append((part.id, newremotehead))
798 pushop.pkfailcb[part.id] = handlefailure
797 pushop.pkfailcb[part.id] = handlefailure
799
798
800 def handlereply(op):
799 def handlereply(op):
801 for partid, node in part2node:
800 for partid, node in part2node:
802 partrep = op.records.getreplies(partid)
801 partrep = op.records.getreplies(partid)
803 results = partrep['pushkey']
802 results = partrep['pushkey']
804 assert len(results) <= 1
803 assert len(results) <= 1
805 msg = None
804 msg = None
806 if not results:
805 if not results:
807 msg = _('server ignored update of %s to public!\n') % node
806 msg = _('server ignored update of %s to public!\n') % node
808 elif not int(results[0]['return']):
807 elif not int(results[0]['return']):
809 msg = _('updating %s to public failed!\n') % node
808 msg = _('updating %s to public failed!\n') % node
810 if msg is not None:
809 if msg is not None:
811 pushop.ui.warn(msg)
810 pushop.ui.warn(msg)
812 return handlereply
811 return handlereply
813
812
814 @b2partsgenerator('obsmarkers')
813 @b2partsgenerator('obsmarkers')
815 def _pushb2obsmarkers(pushop, bundler):
814 def _pushb2obsmarkers(pushop, bundler):
816 if 'obsmarkers' in pushop.stepsdone:
815 if 'obsmarkers' in pushop.stepsdone:
817 return
816 return
818 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
817 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
819 if obsolete.commonversion(remoteversions) is None:
818 if obsolete.commonversion(remoteversions) is None:
820 return
819 return
821 pushop.stepsdone.add('obsmarkers')
820 pushop.stepsdone.add('obsmarkers')
822 if pushop.outobsmarkers:
821 if pushop.outobsmarkers:
823 markers = sorted(pushop.outobsmarkers)
822 markers = sorted(pushop.outobsmarkers)
824 bundle2.buildobsmarkerspart(bundler, markers)
823 bundle2.buildobsmarkerspart(bundler, markers)
825
824
826 @b2partsgenerator('bookmarks')
825 @b2partsgenerator('bookmarks')
827 def _pushb2bookmarks(pushop, bundler):
826 def _pushb2bookmarks(pushop, bundler):
828 """handle bookmark push through bundle2"""
827 """handle bookmark push through bundle2"""
829 if 'bookmarks' in pushop.stepsdone:
828 if 'bookmarks' in pushop.stepsdone:
830 return
829 return
831 b2caps = bundle2.bundle2caps(pushop.remote)
830 b2caps = bundle2.bundle2caps(pushop.remote)
832 if 'pushkey' not in b2caps:
831 if 'pushkey' not in b2caps:
833 return
832 return
834 pushop.stepsdone.add('bookmarks')
833 pushop.stepsdone.add('bookmarks')
835 part2book = []
834 part2book = []
836 enc = pushkey.encode
835 enc = pushkey.encode
837
836
838 def handlefailure(pushop, exc):
837 def handlefailure(pushop, exc):
839 targetid = int(exc.partid)
838 targetid = int(exc.partid)
840 for partid, book, action in part2book:
839 for partid, book, action in part2book:
841 if partid == targetid:
840 if partid == targetid:
842 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
841 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
843 # we should not be called for part we did not generated
842 # we should not be called for part we did not generated
844 assert False
843 assert False
845
844
846 for book, old, new in pushop.outbookmarks:
845 for book, old, new in pushop.outbookmarks:
847 part = bundler.newpart('pushkey')
846 part = bundler.newpart('pushkey')
848 part.addparam('namespace', enc('bookmarks'))
847 part.addparam('namespace', enc('bookmarks'))
849 part.addparam('key', enc(book))
848 part.addparam('key', enc(book))
850 part.addparam('old', enc(old))
849 part.addparam('old', enc(old))
851 part.addparam('new', enc(new))
850 part.addparam('new', enc(new))
852 action = 'update'
851 action = 'update'
853 if not old:
852 if not old:
854 action = 'export'
853 action = 'export'
855 elif not new:
854 elif not new:
856 action = 'delete'
855 action = 'delete'
857 part2book.append((part.id, book, action))
856 part2book.append((part.id, book, action))
858 pushop.pkfailcb[part.id] = handlefailure
857 pushop.pkfailcb[part.id] = handlefailure
859
858
860 def handlereply(op):
859 def handlereply(op):
861 ui = pushop.ui
860 ui = pushop.ui
862 for partid, book, action in part2book:
861 for partid, book, action in part2book:
863 partrep = op.records.getreplies(partid)
862 partrep = op.records.getreplies(partid)
864 results = partrep['pushkey']
863 results = partrep['pushkey']
865 assert len(results) <= 1
864 assert len(results) <= 1
866 if not results:
865 if not results:
867 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
866 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
868 else:
867 else:
869 ret = int(results[0]['return'])
868 ret = int(results[0]['return'])
870 if ret:
869 if ret:
871 ui.status(bookmsgmap[action][0] % book)
870 ui.status(bookmsgmap[action][0] % book)
872 else:
871 else:
873 ui.warn(bookmsgmap[action][1] % book)
872 ui.warn(bookmsgmap[action][1] % book)
874 if pushop.bkresult is not None:
873 if pushop.bkresult is not None:
875 pushop.bkresult = 1
874 pushop.bkresult = 1
876 return handlereply
875 return handlereply
877
876
878 @b2partsgenerator('pushvars', idx=0)
877 @b2partsgenerator('pushvars', idx=0)
879 def _getbundlesendvars(pushop, bundler):
878 def _getbundlesendvars(pushop, bundler):
880 '''send shellvars via bundle2'''
879 '''send shellvars via bundle2'''
881 pushvars = pushop.pushvars
880 pushvars = pushop.pushvars
882 if pushvars:
881 if pushvars:
883 shellvars = {}
882 shellvars = {}
884 for raw in pushvars:
883 for raw in pushvars:
885 if '=' not in raw:
884 if '=' not in raw:
886 msg = ("unable to parse variable '%s', should follow "
885 msg = ("unable to parse variable '%s', should follow "
887 "'KEY=VALUE' or 'KEY=' format")
886 "'KEY=VALUE' or 'KEY=' format")
888 raise error.Abort(msg % raw)
887 raise error.Abort(msg % raw)
889 k, v = raw.split('=', 1)
888 k, v = raw.split('=', 1)
890 shellvars[k] = v
889 shellvars[k] = v
891
890
892 part = bundler.newpart('pushvars')
891 part = bundler.newpart('pushvars')
893
892
894 for key, value in shellvars.iteritems():
893 for key, value in shellvars.iteritems():
895 part.addparam(key, value, mandatory=False)
894 part.addparam(key, value, mandatory=False)
896
895
897 def _pushbundle2(pushop):
896 def _pushbundle2(pushop):
898 """push data to the remote using bundle2
897 """push data to the remote using bundle2
899
898
900 The only currently supported type of data is changegroup but this will
899 The only currently supported type of data is changegroup but this will
901 evolve in the future."""
900 evolve in the future."""
902 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
901 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
903 pushback = (pushop.trmanager
902 pushback = (pushop.trmanager
904 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
903 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
905
904
906 # create reply capability
905 # create reply capability
907 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
906 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
908 allowpushback=pushback))
907 allowpushback=pushback))
909 bundler.newpart('replycaps', data=capsblob)
908 bundler.newpart('replycaps', data=capsblob)
910 replyhandlers = []
909 replyhandlers = []
911 for partgenname in b2partsgenorder:
910 for partgenname in b2partsgenorder:
912 partgen = b2partsgenmapping[partgenname]
911 partgen = b2partsgenmapping[partgenname]
913 ret = partgen(pushop, bundler)
912 ret = partgen(pushop, bundler)
914 if callable(ret):
913 if callable(ret):
915 replyhandlers.append(ret)
914 replyhandlers.append(ret)
916 # do not push if nothing to push
915 # do not push if nothing to push
917 if bundler.nbparts <= 1:
916 if bundler.nbparts <= 1:
918 return
917 return
919 stream = util.chunkbuffer(bundler.getchunks())
918 stream = util.chunkbuffer(bundler.getchunks())
920 try:
919 try:
921 try:
920 try:
922 reply = pushop.remote.unbundle(
921 reply = pushop.remote.unbundle(
923 stream, ['force'], pushop.remote.url())
922 stream, ['force'], pushop.remote.url())
924 except error.BundleValueError as exc:
923 except error.BundleValueError as exc:
925 raise error.Abort(_('missing support for %s') % exc)
924 raise error.Abort(_('missing support for %s') % exc)
926 try:
925 try:
927 trgetter = None
926 trgetter = None
928 if pushback:
927 if pushback:
929 trgetter = pushop.trmanager.transaction
928 trgetter = pushop.trmanager.transaction
930 op = bundle2.processbundle(pushop.repo, reply, trgetter)
929 op = bundle2.processbundle(pushop.repo, reply, trgetter)
931 except error.BundleValueError as exc:
930 except error.BundleValueError as exc:
932 raise error.Abort(_('missing support for %s') % exc)
931 raise error.Abort(_('missing support for %s') % exc)
933 except bundle2.AbortFromPart as exc:
932 except bundle2.AbortFromPart as exc:
934 pushop.ui.status(_('remote: %s\n') % exc)
933 pushop.ui.status(_('remote: %s\n') % exc)
935 if exc.hint is not None:
934 if exc.hint is not None:
936 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
935 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
937 raise error.Abort(_('push failed on remote'))
936 raise error.Abort(_('push failed on remote'))
938 except error.PushkeyFailed as exc:
937 except error.PushkeyFailed as exc:
939 partid = int(exc.partid)
938 partid = int(exc.partid)
940 if partid not in pushop.pkfailcb:
939 if partid not in pushop.pkfailcb:
941 raise
940 raise
942 pushop.pkfailcb[partid](pushop, exc)
941 pushop.pkfailcb[partid](pushop, exc)
943 for rephand in replyhandlers:
942 for rephand in replyhandlers:
944 rephand(op)
943 rephand(op)
945
944
946 def _pushchangeset(pushop):
945 def _pushchangeset(pushop):
947 """Make the actual push of changeset bundle to remote repo"""
946 """Make the actual push of changeset bundle to remote repo"""
948 if 'changesets' in pushop.stepsdone:
947 if 'changesets' in pushop.stepsdone:
949 return
948 return
950 pushop.stepsdone.add('changesets')
949 pushop.stepsdone.add('changesets')
951 if not _pushcheckoutgoing(pushop):
950 if not _pushcheckoutgoing(pushop):
952 return
951 return
953
952
954 # Should have verified this in push().
953 # Should have verified this in push().
955 assert pushop.remote.capable('unbundle')
954 assert pushop.remote.capable('unbundle')
956
955
957 pushop.repo.prepushoutgoinghooks(pushop)
956 pushop.repo.prepushoutgoinghooks(pushop)
958 outgoing = pushop.outgoing
957 outgoing = pushop.outgoing
959 # TODO: get bundlecaps from remote
958 # TODO: get bundlecaps from remote
960 bundlecaps = None
959 bundlecaps = None
961 # create a changegroup from local
960 # create a changegroup from local
962 if pushop.revs is None and not (outgoing.excluded
961 if pushop.revs is None and not (outgoing.excluded
963 or pushop.repo.changelog.filteredrevs):
962 or pushop.repo.changelog.filteredrevs):
964 # push everything,
963 # push everything,
965 # use the fast path, no race possible on push
964 # use the fast path, no race possible on push
966 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
965 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
967 fastpath=True, bundlecaps=bundlecaps)
966 fastpath=True, bundlecaps=bundlecaps)
968 else:
967 else:
969 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
968 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
970 bundlecaps=bundlecaps)
969 bundlecaps=bundlecaps)
971
970
972 # apply changegroup to remote
971 # apply changegroup to remote
973 # local repo finds heads on server, finds out what
972 # local repo finds heads on server, finds out what
974 # revs it must push. once revs transferred, if server
973 # revs it must push. once revs transferred, if server
975 # finds it has different heads (someone else won
974 # finds it has different heads (someone else won
976 # commit/push race), server aborts.
975 # commit/push race), server aborts.
977 if pushop.force:
976 if pushop.force:
978 remoteheads = ['force']
977 remoteheads = ['force']
979 else:
978 else:
980 remoteheads = pushop.remoteheads
979 remoteheads = pushop.remoteheads
981 # ssh: return remote's addchangegroup()
980 # ssh: return remote's addchangegroup()
982 # http: return remote's addchangegroup() or 0 for error
981 # http: return remote's addchangegroup() or 0 for error
983 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
982 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
984 pushop.repo.url())
983 pushop.repo.url())
985
984
986 def _pushsyncphase(pushop):
985 def _pushsyncphase(pushop):
987 """synchronise phase information locally and remotely"""
986 """synchronise phase information locally and remotely"""
988 cheads = pushop.commonheads
987 cheads = pushop.commonheads
989 # even when we don't push, exchanging phase data is useful
988 # even when we don't push, exchanging phase data is useful
990 remotephases = pushop.remote.listkeys('phases')
989 remotephases = pushop.remote.listkeys('phases')
991 if (pushop.ui.configbool('ui', '_usedassubrepo')
990 if (pushop.ui.configbool('ui', '_usedassubrepo')
992 and remotephases # server supports phases
991 and remotephases # server supports phases
993 and pushop.cgresult is None # nothing was pushed
992 and pushop.cgresult is None # nothing was pushed
994 and remotephases.get('publishing', False)):
993 and remotephases.get('publishing', False)):
995 # When:
994 # When:
996 # - this is a subrepo push
995 # - this is a subrepo push
997 # - and remote support phase
996 # - and remote support phase
998 # - and no changeset was pushed
997 # - and no changeset was pushed
999 # - and remote is publishing
998 # - and remote is publishing
1000 # We may be in issue 3871 case!
999 # We may be in issue 3871 case!
1001 # We drop the possible phase synchronisation done by
1000 # We drop the possible phase synchronisation done by
1002 # courtesy to publish changesets possibly locally draft
1001 # courtesy to publish changesets possibly locally draft
1003 # on the remote.
1002 # on the remote.
1004 remotephases = {'publishing': 'True'}
1003 remotephases = {'publishing': 'True'}
1005 if not remotephases: # old server or public only reply from non-publishing
1004 if not remotephases: # old server or public only reply from non-publishing
1006 _localphasemove(pushop, cheads)
1005 _localphasemove(pushop, cheads)
1007 # don't push any phase data as there is nothing to push
1006 # don't push any phase data as there is nothing to push
1008 else:
1007 else:
1009 ana = phases.analyzeremotephases(pushop.repo, cheads,
1008 ana = phases.analyzeremotephases(pushop.repo, cheads,
1010 remotephases)
1009 remotephases)
1011 pheads, droots = ana
1010 pheads, droots = ana
1012 ### Apply remote phase on local
1011 ### Apply remote phase on local
1013 if remotephases.get('publishing', False):
1012 if remotephases.get('publishing', False):
1014 _localphasemove(pushop, cheads)
1013 _localphasemove(pushop, cheads)
1015 else: # publish = False
1014 else: # publish = False
1016 _localphasemove(pushop, pheads)
1015 _localphasemove(pushop, pheads)
1017 _localphasemove(pushop, cheads, phases.draft)
1016 _localphasemove(pushop, cheads, phases.draft)
1018 ### Apply local phase on remote
1017 ### Apply local phase on remote
1019
1018
1020 if pushop.cgresult:
1019 if pushop.cgresult:
1021 if 'phases' in pushop.stepsdone:
1020 if 'phases' in pushop.stepsdone:
1022 # phases already pushed though bundle2
1021 # phases already pushed though bundle2
1023 return
1022 return
1024 outdated = pushop.outdatedphases
1023 outdated = pushop.outdatedphases
1025 else:
1024 else:
1026 outdated = pushop.fallbackoutdatedphases
1025 outdated = pushop.fallbackoutdatedphases
1027
1026
1028 pushop.stepsdone.add('phases')
1027 pushop.stepsdone.add('phases')
1029
1028
1030 # filter heads already turned public by the push
1029 # filter heads already turned public by the push
1031 outdated = [c for c in outdated if c.node() not in pheads]
1030 outdated = [c for c in outdated if c.node() not in pheads]
1032 # fallback to independent pushkey command
1031 # fallback to independent pushkey command
1033 for newremotehead in outdated:
1032 for newremotehead in outdated:
1034 r = pushop.remote.pushkey('phases',
1033 r = pushop.remote.pushkey('phases',
1035 newremotehead.hex(),
1034 newremotehead.hex(),
1036 str(phases.draft),
1035 str(phases.draft),
1037 str(phases.public))
1036 str(phases.public))
1038 if not r:
1037 if not r:
1039 pushop.ui.warn(_('updating %s to public failed!\n')
1038 pushop.ui.warn(_('updating %s to public failed!\n')
1040 % newremotehead)
1039 % newremotehead)
1041
1040
1042 def _localphasemove(pushop, nodes, phase=phases.public):
1041 def _localphasemove(pushop, nodes, phase=phases.public):
1043 """move <nodes> to <phase> in the local source repo"""
1042 """move <nodes> to <phase> in the local source repo"""
1044 if pushop.trmanager:
1043 if pushop.trmanager:
1045 phases.advanceboundary(pushop.repo,
1044 phases.advanceboundary(pushop.repo,
1046 pushop.trmanager.transaction(),
1045 pushop.trmanager.transaction(),
1047 phase,
1046 phase,
1048 nodes)
1047 nodes)
1049 else:
1048 else:
1050 # repo is not locked, do not change any phases!
1049 # repo is not locked, do not change any phases!
1051 # Informs the user that phases should have been moved when
1050 # Informs the user that phases should have been moved when
1052 # applicable.
1051 # applicable.
1053 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1052 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1054 phasestr = phases.phasenames[phase]
1053 phasestr = phases.phasenames[phase]
1055 if actualmoves:
1054 if actualmoves:
1056 pushop.ui.status(_('cannot lock source repo, skipping '
1055 pushop.ui.status(_('cannot lock source repo, skipping '
1057 'local %s phase update\n') % phasestr)
1056 'local %s phase update\n') % phasestr)
1058
1057
1059 def _pushobsolete(pushop):
1058 def _pushobsolete(pushop):
1060 """utility function to push obsolete markers to a remote"""
1059 """utility function to push obsolete markers to a remote"""
1061 if 'obsmarkers' in pushop.stepsdone:
1060 if 'obsmarkers' in pushop.stepsdone:
1062 return
1061 return
1063 repo = pushop.repo
1062 repo = pushop.repo
1064 remote = pushop.remote
1063 remote = pushop.remote
1065 pushop.stepsdone.add('obsmarkers')
1064 pushop.stepsdone.add('obsmarkers')
1066 if pushop.outobsmarkers:
1065 if pushop.outobsmarkers:
1067 pushop.ui.debug('try to push obsolete markers to remote\n')
1066 pushop.ui.debug('try to push obsolete markers to remote\n')
1068 rslts = []
1067 rslts = []
1069 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1068 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1070 for key in sorted(remotedata, reverse=True):
1069 for key in sorted(remotedata, reverse=True):
1071 # reverse sort to ensure we end with dump0
1070 # reverse sort to ensure we end with dump0
1072 data = remotedata[key]
1071 data = remotedata[key]
1073 rslts.append(remote.pushkey('obsolete', key, '', data))
1072 rslts.append(remote.pushkey('obsolete', key, '', data))
1074 if [r for r in rslts if not r]:
1073 if [r for r in rslts if not r]:
1075 msg = _('failed to push some obsolete markers!\n')
1074 msg = _('failed to push some obsolete markers!\n')
1076 repo.ui.warn(msg)
1075 repo.ui.warn(msg)
1077
1076
1078 def _pushbookmark(pushop):
1077 def _pushbookmark(pushop):
1079 """Update bookmark position on remote"""
1078 """Update bookmark position on remote"""
1080 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1079 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1081 return
1080 return
1082 pushop.stepsdone.add('bookmarks')
1081 pushop.stepsdone.add('bookmarks')
1083 ui = pushop.ui
1082 ui = pushop.ui
1084 remote = pushop.remote
1083 remote = pushop.remote
1085
1084
1086 for b, old, new in pushop.outbookmarks:
1085 for b, old, new in pushop.outbookmarks:
1087 action = 'update'
1086 action = 'update'
1088 if not old:
1087 if not old:
1089 action = 'export'
1088 action = 'export'
1090 elif not new:
1089 elif not new:
1091 action = 'delete'
1090 action = 'delete'
1092 if remote.pushkey('bookmarks', b, old, new):
1091 if remote.pushkey('bookmarks', b, old, new):
1093 ui.status(bookmsgmap[action][0] % b)
1092 ui.status(bookmsgmap[action][0] % b)
1094 else:
1093 else:
1095 ui.warn(bookmsgmap[action][1] % b)
1094 ui.warn(bookmsgmap[action][1] % b)
1096 # discovery can have set the value form invalid entry
1095 # discovery can have set the value form invalid entry
1097 if pushop.bkresult is not None:
1096 if pushop.bkresult is not None:
1098 pushop.bkresult = 1
1097 pushop.bkresult = 1
1099
1098
1100 class pulloperation(object):
1099 class pulloperation(object):
1101 """A object that represent a single pull operation
1100 """A object that represent a single pull operation
1102
1101
1103 It purpose is to carry pull related state and very common operation.
1102 It purpose is to carry pull related state and very common operation.
1104
1103
1105 A new should be created at the beginning of each pull and discarded
1104 A new should be created at the beginning of each pull and discarded
1106 afterward.
1105 afterward.
1107 """
1106 """
1108
1107
1109 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1108 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1110 remotebookmarks=None, streamclonerequested=None):
1109 remotebookmarks=None, streamclonerequested=None):
1111 # repo we pull into
1110 # repo we pull into
1112 self.repo = repo
1111 self.repo = repo
1113 # repo we pull from
1112 # repo we pull from
1114 self.remote = remote
1113 self.remote = remote
1115 # revision we try to pull (None is "all")
1114 # revision we try to pull (None is "all")
1116 self.heads = heads
1115 self.heads = heads
1117 # bookmark pulled explicitly
1116 # bookmark pulled explicitly
1118 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1117 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1119 for bookmark in bookmarks]
1118 for bookmark in bookmarks]
1120 # do we force pull?
1119 # do we force pull?
1121 self.force = force
1120 self.force = force
1122 # whether a streaming clone was requested
1121 # whether a streaming clone was requested
1123 self.streamclonerequested = streamclonerequested
1122 self.streamclonerequested = streamclonerequested
1124 # transaction manager
1123 # transaction manager
1125 self.trmanager = None
1124 self.trmanager = None
1126 # set of common changeset between local and remote before pull
1125 # set of common changeset between local and remote before pull
1127 self.common = None
1126 self.common = None
1128 # set of pulled head
1127 # set of pulled head
1129 self.rheads = None
1128 self.rheads = None
1130 # list of missing changeset to fetch remotely
1129 # list of missing changeset to fetch remotely
1131 self.fetch = None
1130 self.fetch = None
1132 # remote bookmarks data
1131 # remote bookmarks data
1133 self.remotebookmarks = remotebookmarks
1132 self.remotebookmarks = remotebookmarks
1134 # result of changegroup pulling (used as return code by pull)
1133 # result of changegroup pulling (used as return code by pull)
1135 self.cgresult = None
1134 self.cgresult = None
1136 # list of step already done
1135 # list of step already done
1137 self.stepsdone = set()
1136 self.stepsdone = set()
1138 # Whether we attempted a clone from pre-generated bundles.
1137 # Whether we attempted a clone from pre-generated bundles.
1139 self.clonebundleattempted = False
1138 self.clonebundleattempted = False
1140
1139
1141 @util.propertycache
1140 @util.propertycache
1142 def pulledsubset(self):
1141 def pulledsubset(self):
1143 """heads of the set of changeset target by the pull"""
1142 """heads of the set of changeset target by the pull"""
1144 # compute target subset
1143 # compute target subset
1145 if self.heads is None:
1144 if self.heads is None:
1146 # We pulled every thing possible
1145 # We pulled every thing possible
1147 # sync on everything common
1146 # sync on everything common
1148 c = set(self.common)
1147 c = set(self.common)
1149 ret = list(self.common)
1148 ret = list(self.common)
1150 for n in self.rheads:
1149 for n in self.rheads:
1151 if n not in c:
1150 if n not in c:
1152 ret.append(n)
1151 ret.append(n)
1153 return ret
1152 return ret
1154 else:
1153 else:
1155 # We pulled a specific subset
1154 # We pulled a specific subset
1156 # sync on this subset
1155 # sync on this subset
1157 return self.heads
1156 return self.heads
1158
1157
1159 @util.propertycache
1158 @util.propertycache
1160 def canusebundle2(self):
1159 def canusebundle2(self):
1161 return not _forcebundle1(self)
1160 return not _forcebundle1(self)
1162
1161
1163 @util.propertycache
1162 @util.propertycache
1164 def remotebundle2caps(self):
1163 def remotebundle2caps(self):
1165 return bundle2.bundle2caps(self.remote)
1164 return bundle2.bundle2caps(self.remote)
1166
1165
1167 def gettransaction(self):
1166 def gettransaction(self):
1168 # deprecated; talk to trmanager directly
1167 # deprecated; talk to trmanager directly
1169 return self.trmanager.transaction()
1168 return self.trmanager.transaction()
1170
1169
1171 class transactionmanager(util.transactional):
1170 class transactionmanager(util.transactional):
1172 """An object to manage the life cycle of a transaction
1171 """An object to manage the life cycle of a transaction
1173
1172
1174 It creates the transaction on demand and calls the appropriate hooks when
1173 It creates the transaction on demand and calls the appropriate hooks when
1175 closing the transaction."""
1174 closing the transaction."""
1176 def __init__(self, repo, source, url):
1175 def __init__(self, repo, source, url):
1177 self.repo = repo
1176 self.repo = repo
1178 self.source = source
1177 self.source = source
1179 self.url = url
1178 self.url = url
1180 self._tr = None
1179 self._tr = None
1181
1180
1182 def transaction(self):
1181 def transaction(self):
1183 """Return an open transaction object, constructing if necessary"""
1182 """Return an open transaction object, constructing if necessary"""
1184 if not self._tr:
1183 if not self._tr:
1185 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1184 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1186 self._tr = self.repo.transaction(trname)
1185 self._tr = self.repo.transaction(trname)
1187 self._tr.hookargs['source'] = self.source
1186 self._tr.hookargs['source'] = self.source
1188 self._tr.hookargs['url'] = self.url
1187 self._tr.hookargs['url'] = self.url
1189 return self._tr
1188 return self._tr
1190
1189
1191 def close(self):
1190 def close(self):
1192 """close transaction if created"""
1191 """close transaction if created"""
1193 if self._tr is not None:
1192 if self._tr is not None:
1194 self._tr.close()
1193 self._tr.close()
1195
1194
1196 def release(self):
1195 def release(self):
1197 """release transaction if created"""
1196 """release transaction if created"""
1198 if self._tr is not None:
1197 if self._tr is not None:
1199 self._tr.release()
1198 self._tr.release()
1200
1199
1201 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1200 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1202 streamclonerequested=None):
1201 streamclonerequested=None):
1203 """Fetch repository data from a remote.
1202 """Fetch repository data from a remote.
1204
1203
1205 This is the main function used to retrieve data from a remote repository.
1204 This is the main function used to retrieve data from a remote repository.
1206
1205
1207 ``repo`` is the local repository to clone into.
1206 ``repo`` is the local repository to clone into.
1208 ``remote`` is a peer instance.
1207 ``remote`` is a peer instance.
1209 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1208 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1210 default) means to pull everything from the remote.
1209 default) means to pull everything from the remote.
1211 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1210 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1212 default, all remote bookmarks are pulled.
1211 default, all remote bookmarks are pulled.
1213 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1212 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1214 initialization.
1213 initialization.
1215 ``streamclonerequested`` is a boolean indicating whether a "streaming
1214 ``streamclonerequested`` is a boolean indicating whether a "streaming
1216 clone" is requested. A "streaming clone" is essentially a raw file copy
1215 clone" is requested. A "streaming clone" is essentially a raw file copy
1217 of revlogs from the server. This only works when the local repository is
1216 of revlogs from the server. This only works when the local repository is
1218 empty. The default value of ``None`` means to respect the server
1217 empty. The default value of ``None`` means to respect the server
1219 configuration for preferring stream clones.
1218 configuration for preferring stream clones.
1220
1219
1221 Returns the ``pulloperation`` created for this pull.
1220 Returns the ``pulloperation`` created for this pull.
1222 """
1221 """
1223 if opargs is None:
1222 if opargs is None:
1224 opargs = {}
1223 opargs = {}
1225 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1224 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1226 streamclonerequested=streamclonerequested, **opargs)
1225 streamclonerequested=streamclonerequested, **opargs)
1227
1226
1228 peerlocal = pullop.remote.local()
1227 peerlocal = pullop.remote.local()
1229 if peerlocal:
1228 if peerlocal:
1230 missing = set(peerlocal.requirements) - pullop.repo.supported
1229 missing = set(peerlocal.requirements) - pullop.repo.supported
1231 if missing:
1230 if missing:
1232 msg = _("required features are not"
1231 msg = _("required features are not"
1233 " supported in the destination:"
1232 " supported in the destination:"
1234 " %s") % (', '.join(sorted(missing)))
1233 " %s") % (', '.join(sorted(missing)))
1235 raise error.Abort(msg)
1234 raise error.Abort(msg)
1236
1235
1237 wlock = lock = None
1236 wlock = lock = None
1238 try:
1237 try:
1239 wlock = pullop.repo.wlock()
1238 wlock = pullop.repo.wlock()
1240 lock = pullop.repo.lock()
1239 lock = pullop.repo.lock()
1241 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1240 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1242 streamclone.maybeperformlegacystreamclone(pullop)
1241 streamclone.maybeperformlegacystreamclone(pullop)
1243 # This should ideally be in _pullbundle2(). However, it needs to run
1242 # This should ideally be in _pullbundle2(). However, it needs to run
1244 # before discovery to avoid extra work.
1243 # before discovery to avoid extra work.
1245 _maybeapplyclonebundle(pullop)
1244 _maybeapplyclonebundle(pullop)
1246 _pulldiscovery(pullop)
1245 _pulldiscovery(pullop)
1247 if pullop.canusebundle2:
1246 if pullop.canusebundle2:
1248 _pullbundle2(pullop)
1247 _pullbundle2(pullop)
1249 _pullchangeset(pullop)
1248 _pullchangeset(pullop)
1250 _pullphase(pullop)
1249 _pullphase(pullop)
1251 _pullbookmarks(pullop)
1250 _pullbookmarks(pullop)
1252 _pullobsolete(pullop)
1251 _pullobsolete(pullop)
1253 pullop.trmanager.close()
1252 pullop.trmanager.close()
1254 finally:
1253 finally:
1255 lockmod.release(pullop.trmanager, lock, wlock)
1254 lockmod.release(pullop.trmanager, lock, wlock)
1256
1255
1257 return pullop
1256 return pullop
1258
1257
1259 # list of steps to perform discovery before pull
1258 # list of steps to perform discovery before pull
1260 pulldiscoveryorder = []
1259 pulldiscoveryorder = []
1261
1260
1262 # Mapping between step name and function
1261 # Mapping between step name and function
1263 #
1262 #
1264 # This exists to help extensions wrap steps if necessary
1263 # This exists to help extensions wrap steps if necessary
1265 pulldiscoverymapping = {}
1264 pulldiscoverymapping = {}
1266
1265
1267 def pulldiscovery(stepname):
1266 def pulldiscovery(stepname):
1268 """decorator for function performing discovery before pull
1267 """decorator for function performing discovery before pull
1269
1268
1270 The function is added to the step -> function mapping and appended to the
1269 The function is added to the step -> function mapping and appended to the
1271 list of steps. Beware that decorated function will be added in order (this
1270 list of steps. Beware that decorated function will be added in order (this
1272 may matter).
1271 may matter).
1273
1272
1274 You can only use this decorator for a new step, if you want to wrap a step
1273 You can only use this decorator for a new step, if you want to wrap a step
1275 from an extension, change the pulldiscovery dictionary directly."""
1274 from an extension, change the pulldiscovery dictionary directly."""
1276 def dec(func):
1275 def dec(func):
1277 assert stepname not in pulldiscoverymapping
1276 assert stepname not in pulldiscoverymapping
1278 pulldiscoverymapping[stepname] = func
1277 pulldiscoverymapping[stepname] = func
1279 pulldiscoveryorder.append(stepname)
1278 pulldiscoveryorder.append(stepname)
1280 return func
1279 return func
1281 return dec
1280 return dec
1282
1281
1283 def _pulldiscovery(pullop):
1282 def _pulldiscovery(pullop):
1284 """Run all discovery steps"""
1283 """Run all discovery steps"""
1285 for stepname in pulldiscoveryorder:
1284 for stepname in pulldiscoveryorder:
1286 step = pulldiscoverymapping[stepname]
1285 step = pulldiscoverymapping[stepname]
1287 step(pullop)
1286 step(pullop)
1288
1287
1289 @pulldiscovery('b1:bookmarks')
1288 @pulldiscovery('b1:bookmarks')
1290 def _pullbookmarkbundle1(pullop):
1289 def _pullbookmarkbundle1(pullop):
1291 """fetch bookmark data in bundle1 case
1290 """fetch bookmark data in bundle1 case
1292
1291
1293 If not using bundle2, we have to fetch bookmarks before changeset
1292 If not using bundle2, we have to fetch bookmarks before changeset
1294 discovery to reduce the chance and impact of race conditions."""
1293 discovery to reduce the chance and impact of race conditions."""
1295 if pullop.remotebookmarks is not None:
1294 if pullop.remotebookmarks is not None:
1296 return
1295 return
1297 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1296 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1298 # all known bundle2 servers now support listkeys, but lets be nice with
1297 # all known bundle2 servers now support listkeys, but lets be nice with
1299 # new implementation.
1298 # new implementation.
1300 return
1299 return
1301 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1300 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1302
1301
1303
1302
1304 @pulldiscovery('changegroup')
1303 @pulldiscovery('changegroup')
1305 def _pulldiscoverychangegroup(pullop):
1304 def _pulldiscoverychangegroup(pullop):
1306 """discovery phase for the pull
1305 """discovery phase for the pull
1307
1306
1308 Current handle changeset discovery only, will change handle all discovery
1307 Current handle changeset discovery only, will change handle all discovery
1309 at some point."""
1308 at some point."""
1310 tmp = discovery.findcommonincoming(pullop.repo,
1309 tmp = discovery.findcommonincoming(pullop.repo,
1311 pullop.remote,
1310 pullop.remote,
1312 heads=pullop.heads,
1311 heads=pullop.heads,
1313 force=pullop.force)
1312 force=pullop.force)
1314 common, fetch, rheads = tmp
1313 common, fetch, rheads = tmp
1315 nm = pullop.repo.unfiltered().changelog.nodemap
1314 nm = pullop.repo.unfiltered().changelog.nodemap
1316 if fetch and rheads:
1315 if fetch and rheads:
1317 # If a remote heads in filtered locally, lets drop it from the unknown
1316 # If a remote heads in filtered locally, lets drop it from the unknown
1318 # remote heads and put in back in common.
1317 # remote heads and put in back in common.
1319 #
1318 #
1320 # This is a hackish solution to catch most of "common but locally
1319 # This is a hackish solution to catch most of "common but locally
1321 # hidden situation". We do not performs discovery on unfiltered
1320 # hidden situation". We do not performs discovery on unfiltered
1322 # repository because it end up doing a pathological amount of round
1321 # repository because it end up doing a pathological amount of round
1323 # trip for w huge amount of changeset we do not care about.
1322 # trip for w huge amount of changeset we do not care about.
1324 #
1323 #
1325 # If a set of such "common but filtered" changeset exist on the server
1324 # If a set of such "common but filtered" changeset exist on the server
1326 # but are not including a remote heads, we'll not be able to detect it,
1325 # but are not including a remote heads, we'll not be able to detect it,
1327 scommon = set(common)
1326 scommon = set(common)
1328 filteredrheads = []
1327 filteredrheads = []
1329 for n in rheads:
1328 for n in rheads:
1330 if n in nm:
1329 if n in nm:
1331 if n not in scommon:
1330 if n not in scommon:
1332 common.append(n)
1331 common.append(n)
1333 else:
1332 else:
1334 filteredrheads.append(n)
1333 filteredrheads.append(n)
1335 if not filteredrheads:
1334 if not filteredrheads:
1336 fetch = []
1335 fetch = []
1337 rheads = filteredrheads
1336 rheads = filteredrheads
1338 pullop.common = common
1337 pullop.common = common
1339 pullop.fetch = fetch
1338 pullop.fetch = fetch
1340 pullop.rheads = rheads
1339 pullop.rheads = rheads
1341
1340
1342 def _pullbundle2(pullop):
1341 def _pullbundle2(pullop):
1343 """pull data using bundle2
1342 """pull data using bundle2
1344
1343
1345 For now, the only supported data are changegroup."""
1344 For now, the only supported data are changegroup."""
1346 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1345 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1347
1346
1348 # At the moment we don't do stream clones over bundle2. If that is
1347 # At the moment we don't do stream clones over bundle2. If that is
1349 # implemented then here's where the check for that will go.
1348 # implemented then here's where the check for that will go.
1350 streaming = False
1349 streaming = False
1351
1350
1352 # pulling changegroup
1351 # pulling changegroup
1353 pullop.stepsdone.add('changegroup')
1352 pullop.stepsdone.add('changegroup')
1354
1353
1355 kwargs['common'] = pullop.common
1354 kwargs['common'] = pullop.common
1356 kwargs['heads'] = pullop.heads or pullop.rheads
1355 kwargs['heads'] = pullop.heads or pullop.rheads
1357 kwargs['cg'] = pullop.fetch
1356 kwargs['cg'] = pullop.fetch
1358 if 'listkeys' in pullop.remotebundle2caps:
1357 if 'listkeys' in pullop.remotebundle2caps:
1359 kwargs['listkeys'] = ['phases']
1358 kwargs['listkeys'] = ['phases']
1360 if pullop.remotebookmarks is None:
1359 if pullop.remotebookmarks is None:
1361 # make sure to always includes bookmark data when migrating
1360 # make sure to always includes bookmark data when migrating
1362 # `hg incoming --bundle` to using this function.
1361 # `hg incoming --bundle` to using this function.
1363 kwargs['listkeys'].append('bookmarks')
1362 kwargs['listkeys'].append('bookmarks')
1364
1363
1365 # If this is a full pull / clone and the server supports the clone bundles
1364 # If this is a full pull / clone and the server supports the clone bundles
1366 # feature, tell the server whether we attempted a clone bundle. The
1365 # feature, tell the server whether we attempted a clone bundle. The
1367 # presence of this flag indicates the client supports clone bundles. This
1366 # presence of this flag indicates the client supports clone bundles. This
1368 # will enable the server to treat clients that support clone bundles
1367 # will enable the server to treat clients that support clone bundles
1369 # differently from those that don't.
1368 # differently from those that don't.
1370 if (pullop.remote.capable('clonebundles')
1369 if (pullop.remote.capable('clonebundles')
1371 and pullop.heads is None and list(pullop.common) == [nullid]):
1370 and pullop.heads is None and list(pullop.common) == [nullid]):
1372 kwargs['cbattempted'] = pullop.clonebundleattempted
1371 kwargs['cbattempted'] = pullop.clonebundleattempted
1373
1372
1374 if streaming:
1373 if streaming:
1375 pullop.repo.ui.status(_('streaming all changes\n'))
1374 pullop.repo.ui.status(_('streaming all changes\n'))
1376 elif not pullop.fetch:
1375 elif not pullop.fetch:
1377 pullop.repo.ui.status(_("no changes found\n"))
1376 pullop.repo.ui.status(_("no changes found\n"))
1378 pullop.cgresult = 0
1377 pullop.cgresult = 0
1379 else:
1378 else:
1380 if pullop.heads is None and list(pullop.common) == [nullid]:
1379 if pullop.heads is None and list(pullop.common) == [nullid]:
1381 pullop.repo.ui.status(_("requesting all changes\n"))
1380 pullop.repo.ui.status(_("requesting all changes\n"))
1382 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1381 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1383 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1382 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1384 if obsolete.commonversion(remoteversions) is not None:
1383 if obsolete.commonversion(remoteversions) is not None:
1385 kwargs['obsmarkers'] = True
1384 kwargs['obsmarkers'] = True
1386 pullop.stepsdone.add('obsmarkers')
1385 pullop.stepsdone.add('obsmarkers')
1387 _pullbundle2extraprepare(pullop, kwargs)
1386 _pullbundle2extraprepare(pullop, kwargs)
1388 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1387 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1389 try:
1388 try:
1390 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1389 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1391 except bundle2.AbortFromPart as exc:
1390 except bundle2.AbortFromPart as exc:
1392 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1391 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1393 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1392 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1394 except error.BundleValueError as exc:
1393 except error.BundleValueError as exc:
1395 raise error.Abort(_('missing support for %s') % exc)
1394 raise error.Abort(_('missing support for %s') % exc)
1396
1395
1397 if pullop.fetch:
1396 if pullop.fetch:
1398 pullop.cgresult = bundle2.combinechangegroupresults(op)
1397 pullop.cgresult = bundle2.combinechangegroupresults(op)
1399
1398
1400 # If the bundle had a phase-heads part, then phase exchange is already done
1399 # If the bundle had a phase-heads part, then phase exchange is already done
1401 if op.records['phase-heads']:
1400 if op.records['phase-heads']:
1402 pullop.stepsdone.add('phases')
1401 pullop.stepsdone.add('phases')
1403
1402
1404 # processing phases change
1403 # processing phases change
1405 for namespace, value in op.records['listkeys']:
1404 for namespace, value in op.records['listkeys']:
1406 if namespace == 'phases':
1405 if namespace == 'phases':
1407 _pullapplyphases(pullop, value)
1406 _pullapplyphases(pullop, value)
1408
1407
1409 # processing bookmark update
1408 # processing bookmark update
1410 for namespace, value in op.records['listkeys']:
1409 for namespace, value in op.records['listkeys']:
1411 if namespace == 'bookmarks':
1410 if namespace == 'bookmarks':
1412 pullop.remotebookmarks = value
1411 pullop.remotebookmarks = value
1413
1412
1414 # bookmark data were either already there or pulled in the bundle
1413 # bookmark data were either already there or pulled in the bundle
1415 if pullop.remotebookmarks is not None:
1414 if pullop.remotebookmarks is not None:
1416 _pullbookmarks(pullop)
1415 _pullbookmarks(pullop)
1417
1416
1418 def _pullbundle2extraprepare(pullop, kwargs):
1417 def _pullbundle2extraprepare(pullop, kwargs):
1419 """hook function so that extensions can extend the getbundle call"""
1418 """hook function so that extensions can extend the getbundle call"""
1420 pass
1419 pass
1421
1420
1422 def _pullchangeset(pullop):
1421 def _pullchangeset(pullop):
1423 """pull changeset from unbundle into the local repo"""
1422 """pull changeset from unbundle into the local repo"""
1424 # We delay the open of the transaction as late as possible so we
1423 # We delay the open of the transaction as late as possible so we
1425 # don't open transaction for nothing or you break future useful
1424 # don't open transaction for nothing or you break future useful
1426 # rollback call
1425 # rollback call
1427 if 'changegroup' in pullop.stepsdone:
1426 if 'changegroup' in pullop.stepsdone:
1428 return
1427 return
1429 pullop.stepsdone.add('changegroup')
1428 pullop.stepsdone.add('changegroup')
1430 if not pullop.fetch:
1429 if not pullop.fetch:
1431 pullop.repo.ui.status(_("no changes found\n"))
1430 pullop.repo.ui.status(_("no changes found\n"))
1432 pullop.cgresult = 0
1431 pullop.cgresult = 0
1433 return
1432 return
1434 tr = pullop.gettransaction()
1433 tr = pullop.gettransaction()
1435 if pullop.heads is None and list(pullop.common) == [nullid]:
1434 if pullop.heads is None and list(pullop.common) == [nullid]:
1436 pullop.repo.ui.status(_("requesting all changes\n"))
1435 pullop.repo.ui.status(_("requesting all changes\n"))
1437 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1436 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1438 # issue1320, avoid a race if remote changed after discovery
1437 # issue1320, avoid a race if remote changed after discovery
1439 pullop.heads = pullop.rheads
1438 pullop.heads = pullop.rheads
1440
1439
1441 if pullop.remote.capable('getbundle'):
1440 if pullop.remote.capable('getbundle'):
1442 # TODO: get bundlecaps from remote
1441 # TODO: get bundlecaps from remote
1443 cg = pullop.remote.getbundle('pull', common=pullop.common,
1442 cg = pullop.remote.getbundle('pull', common=pullop.common,
1444 heads=pullop.heads or pullop.rheads)
1443 heads=pullop.heads or pullop.rheads)
1445 elif pullop.heads is None:
1444 elif pullop.heads is None:
1446 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1445 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1447 elif not pullop.remote.capable('changegroupsubset'):
1446 elif not pullop.remote.capable('changegroupsubset'):
1448 raise error.Abort(_("partial pull cannot be done because "
1447 raise error.Abort(_("partial pull cannot be done because "
1449 "other repository doesn't support "
1448 "other repository doesn't support "
1450 "changegroupsubset."))
1449 "changegroupsubset."))
1451 else:
1450 else:
1452 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1451 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1453 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1452 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1454 pullop.remote.url())
1453 pullop.remote.url())
1455 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1454 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1456
1455
1457 def _pullphase(pullop):
1456 def _pullphase(pullop):
1458 # Get remote phases data from remote
1457 # Get remote phases data from remote
1459 if 'phases' in pullop.stepsdone:
1458 if 'phases' in pullop.stepsdone:
1460 return
1459 return
1461 remotephases = pullop.remote.listkeys('phases')
1460 remotephases = pullop.remote.listkeys('phases')
1462 _pullapplyphases(pullop, remotephases)
1461 _pullapplyphases(pullop, remotephases)
1463
1462
1464 def _pullapplyphases(pullop, remotephases):
1463 def _pullapplyphases(pullop, remotephases):
1465 """apply phase movement from observed remote state"""
1464 """apply phase movement from observed remote state"""
1466 if 'phases' in pullop.stepsdone:
1465 if 'phases' in pullop.stepsdone:
1467 return
1466 return
1468 pullop.stepsdone.add('phases')
1467 pullop.stepsdone.add('phases')
1469 publishing = bool(remotephases.get('publishing', False))
1468 publishing = bool(remotephases.get('publishing', False))
1470 if remotephases and not publishing:
1469 if remotephases and not publishing:
1471 # remote is new and non-publishing
1470 # remote is new and non-publishing
1472 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1471 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1473 pullop.pulledsubset,
1472 pullop.pulledsubset,
1474 remotephases)
1473 remotephases)
1475 dheads = pullop.pulledsubset
1474 dheads = pullop.pulledsubset
1476 else:
1475 else:
1477 # Remote is old or publishing all common changesets
1476 # Remote is old or publishing all common changesets
1478 # should be seen as public
1477 # should be seen as public
1479 pheads = pullop.pulledsubset
1478 pheads = pullop.pulledsubset
1480 dheads = []
1479 dheads = []
1481 unfi = pullop.repo.unfiltered()
1480 unfi = pullop.repo.unfiltered()
1482 phase = unfi._phasecache.phase
1481 phase = unfi._phasecache.phase
1483 rev = unfi.changelog.nodemap.get
1482 rev = unfi.changelog.nodemap.get
1484 public = phases.public
1483 public = phases.public
1485 draft = phases.draft
1484 draft = phases.draft
1486
1485
1487 # exclude changesets already public locally and update the others
1486 # exclude changesets already public locally and update the others
1488 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1487 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1489 if pheads:
1488 if pheads:
1490 tr = pullop.gettransaction()
1489 tr = pullop.gettransaction()
1491 phases.advanceboundary(pullop.repo, tr, public, pheads)
1490 phases.advanceboundary(pullop.repo, tr, public, pheads)
1492
1491
1493 # exclude changesets already draft locally and update the others
1492 # exclude changesets already draft locally and update the others
1494 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1493 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1495 if dheads:
1494 if dheads:
1496 tr = pullop.gettransaction()
1495 tr = pullop.gettransaction()
1497 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1496 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1498
1497
1499 def _pullbookmarks(pullop):
1498 def _pullbookmarks(pullop):
1500 """process the remote bookmark information to update the local one"""
1499 """process the remote bookmark information to update the local one"""
1501 if 'bookmarks' in pullop.stepsdone:
1500 if 'bookmarks' in pullop.stepsdone:
1502 return
1501 return
1503 pullop.stepsdone.add('bookmarks')
1502 pullop.stepsdone.add('bookmarks')
1504 repo = pullop.repo
1503 repo = pullop.repo
1505 remotebookmarks = pullop.remotebookmarks
1504 remotebookmarks = pullop.remotebookmarks
1506 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1505 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1507 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1506 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1508 pullop.remote.url(),
1507 pullop.remote.url(),
1509 pullop.gettransaction,
1508 pullop.gettransaction,
1510 explicit=pullop.explicitbookmarks)
1509 explicit=pullop.explicitbookmarks)
1511
1510
1512 def _pullobsolete(pullop):
1511 def _pullobsolete(pullop):
1513 """utility function to pull obsolete markers from a remote
1512 """utility function to pull obsolete markers from a remote
1514
1513
1515 The `gettransaction` is function that return the pull transaction, creating
1514 The `gettransaction` is function that return the pull transaction, creating
1516 one if necessary. We return the transaction to inform the calling code that
1515 one if necessary. We return the transaction to inform the calling code that
1517 a new transaction have been created (when applicable).
1516 a new transaction have been created (when applicable).
1518
1517
1519 Exists mostly to allow overriding for experimentation purpose"""
1518 Exists mostly to allow overriding for experimentation purpose"""
1520 if 'obsmarkers' in pullop.stepsdone:
1519 if 'obsmarkers' in pullop.stepsdone:
1521 return
1520 return
1522 pullop.stepsdone.add('obsmarkers')
1521 pullop.stepsdone.add('obsmarkers')
1523 tr = None
1522 tr = None
1524 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1523 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1525 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1524 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1526 remoteobs = pullop.remote.listkeys('obsolete')
1525 remoteobs = pullop.remote.listkeys('obsolete')
1527 if 'dump0' in remoteobs:
1526 if 'dump0' in remoteobs:
1528 tr = pullop.gettransaction()
1527 tr = pullop.gettransaction()
1529 markers = []
1528 markers = []
1530 for key in sorted(remoteobs, reverse=True):
1529 for key in sorted(remoteobs, reverse=True):
1531 if key.startswith('dump'):
1530 if key.startswith('dump'):
1532 data = util.b85decode(remoteobs[key])
1531 data = util.b85decode(remoteobs[key])
1533 version, newmarks = obsolete._readmarkers(data)
1532 version, newmarks = obsolete._readmarkers(data)
1534 markers += newmarks
1533 markers += newmarks
1535 if markers:
1534 if markers:
1536 pullop.repo.obsstore.add(tr, markers)
1535 pullop.repo.obsstore.add(tr, markers)
1537 pullop.repo.invalidatevolatilesets()
1536 pullop.repo.invalidatevolatilesets()
1538 return tr
1537 return tr
1539
1538
1540 def caps20to10(repo):
1539 def caps20to10(repo):
1541 """return a set with appropriate options to use bundle20 during getbundle"""
1540 """return a set with appropriate options to use bundle20 during getbundle"""
1542 caps = {'HG20'}
1541 caps = {'HG20'}
1543 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1542 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1544 caps.add('bundle2=' + urlreq.quote(capsblob))
1543 caps.add('bundle2=' + urlreq.quote(capsblob))
1545 return caps
1544 return caps
1546
1545
1547 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1546 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1548 getbundle2partsorder = []
1547 getbundle2partsorder = []
1549
1548
1550 # Mapping between step name and function
1549 # Mapping between step name and function
1551 #
1550 #
1552 # This exists to help extensions wrap steps if necessary
1551 # This exists to help extensions wrap steps if necessary
1553 getbundle2partsmapping = {}
1552 getbundle2partsmapping = {}
1554
1553
1555 def getbundle2partsgenerator(stepname, idx=None):
1554 def getbundle2partsgenerator(stepname, idx=None):
1556 """decorator for function generating bundle2 part for getbundle
1555 """decorator for function generating bundle2 part for getbundle
1557
1556
1558 The function is added to the step -> function mapping and appended to the
1557 The function is added to the step -> function mapping and appended to the
1559 list of steps. Beware that decorated functions will be added in order
1558 list of steps. Beware that decorated functions will be added in order
1560 (this may matter).
1559 (this may matter).
1561
1560
1562 You can only use this decorator for new steps, if you want to wrap a step
1561 You can only use this decorator for new steps, if you want to wrap a step
1563 from an extension, attack the getbundle2partsmapping dictionary directly."""
1562 from an extension, attack the getbundle2partsmapping dictionary directly."""
1564 def dec(func):
1563 def dec(func):
1565 assert stepname not in getbundle2partsmapping
1564 assert stepname not in getbundle2partsmapping
1566 getbundle2partsmapping[stepname] = func
1565 getbundle2partsmapping[stepname] = func
1567 if idx is None:
1566 if idx is None:
1568 getbundle2partsorder.append(stepname)
1567 getbundle2partsorder.append(stepname)
1569 else:
1568 else:
1570 getbundle2partsorder.insert(idx, stepname)
1569 getbundle2partsorder.insert(idx, stepname)
1571 return func
1570 return func
1572 return dec
1571 return dec
1573
1572
1574 def bundle2requested(bundlecaps):
1573 def bundle2requested(bundlecaps):
1575 if bundlecaps is not None:
1574 if bundlecaps is not None:
1576 return any(cap.startswith('HG2') for cap in bundlecaps)
1575 return any(cap.startswith('HG2') for cap in bundlecaps)
1577 return False
1576 return False
1578
1577
1579 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1578 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1580 **kwargs):
1579 **kwargs):
1581 """Return chunks constituting a bundle's raw data.
1580 """Return chunks constituting a bundle's raw data.
1582
1581
1583 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1582 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1584 passed.
1583 passed.
1585
1584
1586 Returns an iterator over raw chunks (of varying sizes).
1585 Returns an iterator over raw chunks (of varying sizes).
1587 """
1586 """
1588 kwargs = pycompat.byteskwargs(kwargs)
1587 kwargs = pycompat.byteskwargs(kwargs)
1589 usebundle2 = bundle2requested(bundlecaps)
1588 usebundle2 = bundle2requested(bundlecaps)
1590 # bundle10 case
1589 # bundle10 case
1591 if not usebundle2:
1590 if not usebundle2:
1592 if bundlecaps and not kwargs.get('cg', True):
1591 if bundlecaps and not kwargs.get('cg', True):
1593 raise ValueError(_('request for bundle10 must include changegroup'))
1592 raise ValueError(_('request for bundle10 must include changegroup'))
1594
1593
1595 if kwargs:
1594 if kwargs:
1596 raise ValueError(_('unsupported getbundle arguments: %s')
1595 raise ValueError(_('unsupported getbundle arguments: %s')
1597 % ', '.join(sorted(kwargs.keys())))
1596 % ', '.join(sorted(kwargs.keys())))
1598 outgoing = _computeoutgoing(repo, heads, common)
1597 outgoing = _computeoutgoing(repo, heads, common)
1599 bundler = changegroup.getbundler('01', repo, bundlecaps)
1598 bundler = changegroup.getbundler('01', repo, bundlecaps)
1600 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1599 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1601
1600
1602 # bundle20 case
1601 # bundle20 case
1603 b2caps = {}
1602 b2caps = {}
1604 for bcaps in bundlecaps:
1603 for bcaps in bundlecaps:
1605 if bcaps.startswith('bundle2='):
1604 if bcaps.startswith('bundle2='):
1606 blob = urlreq.unquote(bcaps[len('bundle2='):])
1605 blob = urlreq.unquote(bcaps[len('bundle2='):])
1607 b2caps.update(bundle2.decodecaps(blob))
1606 b2caps.update(bundle2.decodecaps(blob))
1608 bundler = bundle2.bundle20(repo.ui, b2caps)
1607 bundler = bundle2.bundle20(repo.ui, b2caps)
1609
1608
1610 kwargs['heads'] = heads
1609 kwargs['heads'] = heads
1611 kwargs['common'] = common
1610 kwargs['common'] = common
1612
1611
1613 for name in getbundle2partsorder:
1612 for name in getbundle2partsorder:
1614 func = getbundle2partsmapping[name]
1613 func = getbundle2partsmapping[name]
1615 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1614 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1616 **pycompat.strkwargs(kwargs))
1615 **pycompat.strkwargs(kwargs))
1617
1616
1618 return bundler.getchunks()
1617 return bundler.getchunks()
1619
1618
1620 @getbundle2partsgenerator('changegroup')
1619 @getbundle2partsgenerator('changegroup')
1621 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1620 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1622 b2caps=None, heads=None, common=None, **kwargs):
1621 b2caps=None, heads=None, common=None, **kwargs):
1623 """add a changegroup part to the requested bundle"""
1622 """add a changegroup part to the requested bundle"""
1624 cg = None
1623 cgstream = None
1625 if kwargs.get('cg', True):
1624 if kwargs.get('cg', True):
1626 # build changegroup bundle here.
1625 # build changegroup bundle here.
1627 version = '01'
1626 version = '01'
1628 cgversions = b2caps.get('changegroup')
1627 cgversions = b2caps.get('changegroup')
1629 if cgversions: # 3.1 and 3.2 ship with an empty value
1628 if cgversions: # 3.1 and 3.2 ship with an empty value
1630 cgversions = [v for v in cgversions
1629 cgversions = [v for v in cgversions
1631 if v in changegroup.supportedoutgoingversions(repo)]
1630 if v in changegroup.supportedoutgoingversions(repo)]
1632 if not cgversions:
1631 if not cgversions:
1633 raise ValueError(_('no common changegroup version'))
1632 raise ValueError(_('no common changegroup version'))
1634 version = max(cgversions)
1633 version = max(cgversions)
1635 outgoing = _computeoutgoing(repo, heads, common)
1634 outgoing = _computeoutgoing(repo, heads, common)
1636 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1635 cgstream = changegroup.makestream(repo, outgoing, version, source,
1637 bundlecaps=bundlecaps,
1636 bundlecaps=bundlecaps)
1638 version=version)
1639
1637
1640 if cg:
1638 if cgstream:
1641 part = bundler.newpart('changegroup', data=cg)
1639 part = bundler.newpart('changegroup', data=cgstream)
1642 if cgversions:
1640 if cgversions:
1643 part.addparam('version', version)
1641 part.addparam('version', version)
1644 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1642 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1645 if 'treemanifest' in repo.requirements:
1643 if 'treemanifest' in repo.requirements:
1646 part.addparam('treemanifest', '1')
1644 part.addparam('treemanifest', '1')
1647
1645
1648 @getbundle2partsgenerator('listkeys')
1646 @getbundle2partsgenerator('listkeys')
1649 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1647 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1650 b2caps=None, **kwargs):
1648 b2caps=None, **kwargs):
1651 """add parts containing listkeys namespaces to the requested bundle"""
1649 """add parts containing listkeys namespaces to the requested bundle"""
1652 listkeys = kwargs.get('listkeys', ())
1650 listkeys = kwargs.get('listkeys', ())
1653 for namespace in listkeys:
1651 for namespace in listkeys:
1654 part = bundler.newpart('listkeys')
1652 part = bundler.newpart('listkeys')
1655 part.addparam('namespace', namespace)
1653 part.addparam('namespace', namespace)
1656 keys = repo.listkeys(namespace).items()
1654 keys = repo.listkeys(namespace).items()
1657 part.data = pushkey.encodekeys(keys)
1655 part.data = pushkey.encodekeys(keys)
1658
1656
1659 @getbundle2partsgenerator('obsmarkers')
1657 @getbundle2partsgenerator('obsmarkers')
1660 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1658 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1661 b2caps=None, heads=None, **kwargs):
1659 b2caps=None, heads=None, **kwargs):
1662 """add an obsolescence markers part to the requested bundle"""
1660 """add an obsolescence markers part to the requested bundle"""
1663 if kwargs.get('obsmarkers', False):
1661 if kwargs.get('obsmarkers', False):
1664 if heads is None:
1662 if heads is None:
1665 heads = repo.heads()
1663 heads = repo.heads()
1666 subset = [c.node() for c in repo.set('::%ln', heads)]
1664 subset = [c.node() for c in repo.set('::%ln', heads)]
1667 markers = repo.obsstore.relevantmarkers(subset)
1665 markers = repo.obsstore.relevantmarkers(subset)
1668 markers = sorted(markers)
1666 markers = sorted(markers)
1669 bundle2.buildobsmarkerspart(bundler, markers)
1667 bundle2.buildobsmarkerspart(bundler, markers)
1670
1668
1671 @getbundle2partsgenerator('hgtagsfnodes')
1669 @getbundle2partsgenerator('hgtagsfnodes')
1672 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1670 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1673 b2caps=None, heads=None, common=None,
1671 b2caps=None, heads=None, common=None,
1674 **kwargs):
1672 **kwargs):
1675 """Transfer the .hgtags filenodes mapping.
1673 """Transfer the .hgtags filenodes mapping.
1676
1674
1677 Only values for heads in this bundle will be transferred.
1675 Only values for heads in this bundle will be transferred.
1678
1676
1679 The part data consists of pairs of 20 byte changeset node and .hgtags
1677 The part data consists of pairs of 20 byte changeset node and .hgtags
1680 filenodes raw values.
1678 filenodes raw values.
1681 """
1679 """
1682 # Don't send unless:
1680 # Don't send unless:
1683 # - changeset are being exchanged,
1681 # - changeset are being exchanged,
1684 # - the client supports it.
1682 # - the client supports it.
1685 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1683 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1686 return
1684 return
1687
1685
1688 outgoing = _computeoutgoing(repo, heads, common)
1686 outgoing = _computeoutgoing(repo, heads, common)
1689 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1687 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1690
1688
1691 def _getbookmarks(repo, **kwargs):
1689 def _getbookmarks(repo, **kwargs):
1692 """Returns bookmark to node mapping.
1690 """Returns bookmark to node mapping.
1693
1691
1694 This function is primarily used to generate `bookmarks` bundle2 part.
1692 This function is primarily used to generate `bookmarks` bundle2 part.
1695 It is a separate function in order to make it easy to wrap it
1693 It is a separate function in order to make it easy to wrap it
1696 in extensions. Passing `kwargs` to the function makes it easy to
1694 in extensions. Passing `kwargs` to the function makes it easy to
1697 add new parameters in extensions.
1695 add new parameters in extensions.
1698 """
1696 """
1699
1697
1700 return dict(bookmod.listbinbookmarks(repo))
1698 return dict(bookmod.listbinbookmarks(repo))
1701
1699
1702 def check_heads(repo, their_heads, context):
1700 def check_heads(repo, their_heads, context):
1703 """check if the heads of a repo have been modified
1701 """check if the heads of a repo have been modified
1704
1702
1705 Used by peer for unbundling.
1703 Used by peer for unbundling.
1706 """
1704 """
1707 heads = repo.heads()
1705 heads = repo.heads()
1708 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1706 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1709 if not (their_heads == ['force'] or their_heads == heads or
1707 if not (their_heads == ['force'] or their_heads == heads or
1710 their_heads == ['hashed', heads_hash]):
1708 their_heads == ['hashed', heads_hash]):
1711 # someone else committed/pushed/unbundled while we
1709 # someone else committed/pushed/unbundled while we
1712 # were transferring data
1710 # were transferring data
1713 raise error.PushRaced('repository changed while %s - '
1711 raise error.PushRaced('repository changed while %s - '
1714 'please try again' % context)
1712 'please try again' % context)
1715
1713
1716 def unbundle(repo, cg, heads, source, url):
1714 def unbundle(repo, cg, heads, source, url):
1717 """Apply a bundle to a repo.
1715 """Apply a bundle to a repo.
1718
1716
1719 this function makes sure the repo is locked during the application and have
1717 this function makes sure the repo is locked during the application and have
1720 mechanism to check that no push race occurred between the creation of the
1718 mechanism to check that no push race occurred between the creation of the
1721 bundle and its application.
1719 bundle and its application.
1722
1720
1723 If the push was raced as PushRaced exception is raised."""
1721 If the push was raced as PushRaced exception is raised."""
1724 r = 0
1722 r = 0
1725 # need a transaction when processing a bundle2 stream
1723 # need a transaction when processing a bundle2 stream
1726 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1724 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1727 lockandtr = [None, None, None]
1725 lockandtr = [None, None, None]
1728 recordout = None
1726 recordout = None
1729 # quick fix for output mismatch with bundle2 in 3.4
1727 # quick fix for output mismatch with bundle2 in 3.4
1730 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1728 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1731 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1729 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1732 captureoutput = True
1730 captureoutput = True
1733 try:
1731 try:
1734 # note: outside bundle1, 'heads' is expected to be empty and this
1732 # note: outside bundle1, 'heads' is expected to be empty and this
1735 # 'check_heads' call wil be a no-op
1733 # 'check_heads' call wil be a no-op
1736 check_heads(repo, heads, 'uploading changes')
1734 check_heads(repo, heads, 'uploading changes')
1737 # push can proceed
1735 # push can proceed
1738 if not isinstance(cg, bundle2.unbundle20):
1736 if not isinstance(cg, bundle2.unbundle20):
1739 # legacy case: bundle1 (changegroup 01)
1737 # legacy case: bundle1 (changegroup 01)
1740 txnname = "\n".join([source, util.hidepassword(url)])
1738 txnname = "\n".join([source, util.hidepassword(url)])
1741 with repo.lock(), repo.transaction(txnname) as tr:
1739 with repo.lock(), repo.transaction(txnname) as tr:
1742 op = bundle2.applybundle(repo, cg, tr, source, url)
1740 op = bundle2.applybundle(repo, cg, tr, source, url)
1743 r = bundle2.combinechangegroupresults(op)
1741 r = bundle2.combinechangegroupresults(op)
1744 else:
1742 else:
1745 r = None
1743 r = None
1746 try:
1744 try:
1747 def gettransaction():
1745 def gettransaction():
1748 if not lockandtr[2]:
1746 if not lockandtr[2]:
1749 lockandtr[0] = repo.wlock()
1747 lockandtr[0] = repo.wlock()
1750 lockandtr[1] = repo.lock()
1748 lockandtr[1] = repo.lock()
1751 lockandtr[2] = repo.transaction(source)
1749 lockandtr[2] = repo.transaction(source)
1752 lockandtr[2].hookargs['source'] = source
1750 lockandtr[2].hookargs['source'] = source
1753 lockandtr[2].hookargs['url'] = url
1751 lockandtr[2].hookargs['url'] = url
1754 lockandtr[2].hookargs['bundle2'] = '1'
1752 lockandtr[2].hookargs['bundle2'] = '1'
1755 return lockandtr[2]
1753 return lockandtr[2]
1756
1754
1757 # Do greedy locking by default until we're satisfied with lazy
1755 # Do greedy locking by default until we're satisfied with lazy
1758 # locking.
1756 # locking.
1759 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1757 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1760 gettransaction()
1758 gettransaction()
1761
1759
1762 op = bundle2.bundleoperation(repo, gettransaction,
1760 op = bundle2.bundleoperation(repo, gettransaction,
1763 captureoutput=captureoutput)
1761 captureoutput=captureoutput)
1764 try:
1762 try:
1765 op = bundle2.processbundle(repo, cg, op=op)
1763 op = bundle2.processbundle(repo, cg, op=op)
1766 finally:
1764 finally:
1767 r = op.reply
1765 r = op.reply
1768 if captureoutput and r is not None:
1766 if captureoutput and r is not None:
1769 repo.ui.pushbuffer(error=True, subproc=True)
1767 repo.ui.pushbuffer(error=True, subproc=True)
1770 def recordout(output):
1768 def recordout(output):
1771 r.newpart('output', data=output, mandatory=False)
1769 r.newpart('output', data=output, mandatory=False)
1772 if lockandtr[2] is not None:
1770 if lockandtr[2] is not None:
1773 lockandtr[2].close()
1771 lockandtr[2].close()
1774 except BaseException as exc:
1772 except BaseException as exc:
1775 exc.duringunbundle2 = True
1773 exc.duringunbundle2 = True
1776 if captureoutput and r is not None:
1774 if captureoutput and r is not None:
1777 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1775 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1778 def recordout(output):
1776 def recordout(output):
1779 part = bundle2.bundlepart('output', data=output,
1777 part = bundle2.bundlepart('output', data=output,
1780 mandatory=False)
1778 mandatory=False)
1781 parts.append(part)
1779 parts.append(part)
1782 raise
1780 raise
1783 finally:
1781 finally:
1784 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1782 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1785 if recordout is not None:
1783 if recordout is not None:
1786 recordout(repo.ui.popbuffer())
1784 recordout(repo.ui.popbuffer())
1787 return r
1785 return r
1788
1786
1789 def _maybeapplyclonebundle(pullop):
1787 def _maybeapplyclonebundle(pullop):
1790 """Apply a clone bundle from a remote, if possible."""
1788 """Apply a clone bundle from a remote, if possible."""
1791
1789
1792 repo = pullop.repo
1790 repo = pullop.repo
1793 remote = pullop.remote
1791 remote = pullop.remote
1794
1792
1795 if not repo.ui.configbool('ui', 'clonebundles'):
1793 if not repo.ui.configbool('ui', 'clonebundles'):
1796 return
1794 return
1797
1795
1798 # Only run if local repo is empty.
1796 # Only run if local repo is empty.
1799 if len(repo):
1797 if len(repo):
1800 return
1798 return
1801
1799
1802 if pullop.heads:
1800 if pullop.heads:
1803 return
1801 return
1804
1802
1805 if not remote.capable('clonebundles'):
1803 if not remote.capable('clonebundles'):
1806 return
1804 return
1807
1805
1808 res = remote._call('clonebundles')
1806 res = remote._call('clonebundles')
1809
1807
1810 # If we call the wire protocol command, that's good enough to record the
1808 # If we call the wire protocol command, that's good enough to record the
1811 # attempt.
1809 # attempt.
1812 pullop.clonebundleattempted = True
1810 pullop.clonebundleattempted = True
1813
1811
1814 entries = parseclonebundlesmanifest(repo, res)
1812 entries = parseclonebundlesmanifest(repo, res)
1815 if not entries:
1813 if not entries:
1816 repo.ui.note(_('no clone bundles available on remote; '
1814 repo.ui.note(_('no clone bundles available on remote; '
1817 'falling back to regular clone\n'))
1815 'falling back to regular clone\n'))
1818 return
1816 return
1819
1817
1820 entries = filterclonebundleentries(repo, entries)
1818 entries = filterclonebundleentries(repo, entries)
1821 if not entries:
1819 if not entries:
1822 # There is a thundering herd concern here. However, if a server
1820 # There is a thundering herd concern here. However, if a server
1823 # operator doesn't advertise bundles appropriate for its clients,
1821 # operator doesn't advertise bundles appropriate for its clients,
1824 # they deserve what's coming. Furthermore, from a client's
1822 # they deserve what's coming. Furthermore, from a client's
1825 # perspective, no automatic fallback would mean not being able to
1823 # perspective, no automatic fallback would mean not being able to
1826 # clone!
1824 # clone!
1827 repo.ui.warn(_('no compatible clone bundles available on server; '
1825 repo.ui.warn(_('no compatible clone bundles available on server; '
1828 'falling back to regular clone\n'))
1826 'falling back to regular clone\n'))
1829 repo.ui.warn(_('(you may want to report this to the server '
1827 repo.ui.warn(_('(you may want to report this to the server '
1830 'operator)\n'))
1828 'operator)\n'))
1831 return
1829 return
1832
1830
1833 entries = sortclonebundleentries(repo.ui, entries)
1831 entries = sortclonebundleentries(repo.ui, entries)
1834
1832
1835 url = entries[0]['URL']
1833 url = entries[0]['URL']
1836 repo.ui.status(_('applying clone bundle from %s\n') % url)
1834 repo.ui.status(_('applying clone bundle from %s\n') % url)
1837 if trypullbundlefromurl(repo.ui, repo, url):
1835 if trypullbundlefromurl(repo.ui, repo, url):
1838 repo.ui.status(_('finished applying clone bundle\n'))
1836 repo.ui.status(_('finished applying clone bundle\n'))
1839 # Bundle failed.
1837 # Bundle failed.
1840 #
1838 #
1841 # We abort by default to avoid the thundering herd of
1839 # We abort by default to avoid the thundering herd of
1842 # clients flooding a server that was expecting expensive
1840 # clients flooding a server that was expecting expensive
1843 # clone load to be offloaded.
1841 # clone load to be offloaded.
1844 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1842 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1845 repo.ui.warn(_('falling back to normal clone\n'))
1843 repo.ui.warn(_('falling back to normal clone\n'))
1846 else:
1844 else:
1847 raise error.Abort(_('error applying bundle'),
1845 raise error.Abort(_('error applying bundle'),
1848 hint=_('if this error persists, consider contacting '
1846 hint=_('if this error persists, consider contacting '
1849 'the server operator or disable clone '
1847 'the server operator or disable clone '
1850 'bundles via '
1848 'bundles via '
1851 '"--config ui.clonebundles=false"'))
1849 '"--config ui.clonebundles=false"'))
1852
1850
1853 def parseclonebundlesmanifest(repo, s):
1851 def parseclonebundlesmanifest(repo, s):
1854 """Parses the raw text of a clone bundles manifest.
1852 """Parses the raw text of a clone bundles manifest.
1855
1853
1856 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1854 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1857 to the URL and other keys are the attributes for the entry.
1855 to the URL and other keys are the attributes for the entry.
1858 """
1856 """
1859 m = []
1857 m = []
1860 for line in s.splitlines():
1858 for line in s.splitlines():
1861 fields = line.split()
1859 fields = line.split()
1862 if not fields:
1860 if not fields:
1863 continue
1861 continue
1864 attrs = {'URL': fields[0]}
1862 attrs = {'URL': fields[0]}
1865 for rawattr in fields[1:]:
1863 for rawattr in fields[1:]:
1866 key, value = rawattr.split('=', 1)
1864 key, value = rawattr.split('=', 1)
1867 key = urlreq.unquote(key)
1865 key = urlreq.unquote(key)
1868 value = urlreq.unquote(value)
1866 value = urlreq.unquote(value)
1869 attrs[key] = value
1867 attrs[key] = value
1870
1868
1871 # Parse BUNDLESPEC into components. This makes client-side
1869 # Parse BUNDLESPEC into components. This makes client-side
1872 # preferences easier to specify since you can prefer a single
1870 # preferences easier to specify since you can prefer a single
1873 # component of the BUNDLESPEC.
1871 # component of the BUNDLESPEC.
1874 if key == 'BUNDLESPEC':
1872 if key == 'BUNDLESPEC':
1875 try:
1873 try:
1876 comp, version, params = parsebundlespec(repo, value,
1874 comp, version, params = parsebundlespec(repo, value,
1877 externalnames=True)
1875 externalnames=True)
1878 attrs['COMPRESSION'] = comp
1876 attrs['COMPRESSION'] = comp
1879 attrs['VERSION'] = version
1877 attrs['VERSION'] = version
1880 except error.InvalidBundleSpecification:
1878 except error.InvalidBundleSpecification:
1881 pass
1879 pass
1882 except error.UnsupportedBundleSpecification:
1880 except error.UnsupportedBundleSpecification:
1883 pass
1881 pass
1884
1882
1885 m.append(attrs)
1883 m.append(attrs)
1886
1884
1887 return m
1885 return m
1888
1886
1889 def filterclonebundleentries(repo, entries):
1887 def filterclonebundleentries(repo, entries):
1890 """Remove incompatible clone bundle manifest entries.
1888 """Remove incompatible clone bundle manifest entries.
1891
1889
1892 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1890 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1893 and returns a new list consisting of only the entries that this client
1891 and returns a new list consisting of only the entries that this client
1894 should be able to apply.
1892 should be able to apply.
1895
1893
1896 There is no guarantee we'll be able to apply all returned entries because
1894 There is no guarantee we'll be able to apply all returned entries because
1897 the metadata we use to filter on may be missing or wrong.
1895 the metadata we use to filter on may be missing or wrong.
1898 """
1896 """
1899 newentries = []
1897 newentries = []
1900 for entry in entries:
1898 for entry in entries:
1901 spec = entry.get('BUNDLESPEC')
1899 spec = entry.get('BUNDLESPEC')
1902 if spec:
1900 if spec:
1903 try:
1901 try:
1904 parsebundlespec(repo, spec, strict=True)
1902 parsebundlespec(repo, spec, strict=True)
1905 except error.InvalidBundleSpecification as e:
1903 except error.InvalidBundleSpecification as e:
1906 repo.ui.debug(str(e) + '\n')
1904 repo.ui.debug(str(e) + '\n')
1907 continue
1905 continue
1908 except error.UnsupportedBundleSpecification as e:
1906 except error.UnsupportedBundleSpecification as e:
1909 repo.ui.debug('filtering %s because unsupported bundle '
1907 repo.ui.debug('filtering %s because unsupported bundle '
1910 'spec: %s\n' % (entry['URL'], str(e)))
1908 'spec: %s\n' % (entry['URL'], str(e)))
1911 continue
1909 continue
1912
1910
1913 if 'REQUIRESNI' in entry and not sslutil.hassni:
1911 if 'REQUIRESNI' in entry and not sslutil.hassni:
1914 repo.ui.debug('filtering %s because SNI not supported\n' %
1912 repo.ui.debug('filtering %s because SNI not supported\n' %
1915 entry['URL'])
1913 entry['URL'])
1916 continue
1914 continue
1917
1915
1918 newentries.append(entry)
1916 newentries.append(entry)
1919
1917
1920 return newentries
1918 return newentries
1921
1919
1922 class clonebundleentry(object):
1920 class clonebundleentry(object):
1923 """Represents an item in a clone bundles manifest.
1921 """Represents an item in a clone bundles manifest.
1924
1922
1925 This rich class is needed to support sorting since sorted() in Python 3
1923 This rich class is needed to support sorting since sorted() in Python 3
1926 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1924 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1927 won't work.
1925 won't work.
1928 """
1926 """
1929
1927
1930 def __init__(self, value, prefers):
1928 def __init__(self, value, prefers):
1931 self.value = value
1929 self.value = value
1932 self.prefers = prefers
1930 self.prefers = prefers
1933
1931
1934 def _cmp(self, other):
1932 def _cmp(self, other):
1935 for prefkey, prefvalue in self.prefers:
1933 for prefkey, prefvalue in self.prefers:
1936 avalue = self.value.get(prefkey)
1934 avalue = self.value.get(prefkey)
1937 bvalue = other.value.get(prefkey)
1935 bvalue = other.value.get(prefkey)
1938
1936
1939 # Special case for b missing attribute and a matches exactly.
1937 # Special case for b missing attribute and a matches exactly.
1940 if avalue is not None and bvalue is None and avalue == prefvalue:
1938 if avalue is not None and bvalue is None and avalue == prefvalue:
1941 return -1
1939 return -1
1942
1940
1943 # Special case for a missing attribute and b matches exactly.
1941 # Special case for a missing attribute and b matches exactly.
1944 if bvalue is not None and avalue is None and bvalue == prefvalue:
1942 if bvalue is not None and avalue is None and bvalue == prefvalue:
1945 return 1
1943 return 1
1946
1944
1947 # We can't compare unless attribute present on both.
1945 # We can't compare unless attribute present on both.
1948 if avalue is None or bvalue is None:
1946 if avalue is None or bvalue is None:
1949 continue
1947 continue
1950
1948
1951 # Same values should fall back to next attribute.
1949 # Same values should fall back to next attribute.
1952 if avalue == bvalue:
1950 if avalue == bvalue:
1953 continue
1951 continue
1954
1952
1955 # Exact matches come first.
1953 # Exact matches come first.
1956 if avalue == prefvalue:
1954 if avalue == prefvalue:
1957 return -1
1955 return -1
1958 if bvalue == prefvalue:
1956 if bvalue == prefvalue:
1959 return 1
1957 return 1
1960
1958
1961 # Fall back to next attribute.
1959 # Fall back to next attribute.
1962 continue
1960 continue
1963
1961
1964 # If we got here we couldn't sort by attributes and prefers. Fall
1962 # If we got here we couldn't sort by attributes and prefers. Fall
1965 # back to index order.
1963 # back to index order.
1966 return 0
1964 return 0
1967
1965
1968 def __lt__(self, other):
1966 def __lt__(self, other):
1969 return self._cmp(other) < 0
1967 return self._cmp(other) < 0
1970
1968
1971 def __gt__(self, other):
1969 def __gt__(self, other):
1972 return self._cmp(other) > 0
1970 return self._cmp(other) > 0
1973
1971
1974 def __eq__(self, other):
1972 def __eq__(self, other):
1975 return self._cmp(other) == 0
1973 return self._cmp(other) == 0
1976
1974
1977 def __le__(self, other):
1975 def __le__(self, other):
1978 return self._cmp(other) <= 0
1976 return self._cmp(other) <= 0
1979
1977
1980 def __ge__(self, other):
1978 def __ge__(self, other):
1981 return self._cmp(other) >= 0
1979 return self._cmp(other) >= 0
1982
1980
1983 def __ne__(self, other):
1981 def __ne__(self, other):
1984 return self._cmp(other) != 0
1982 return self._cmp(other) != 0
1985
1983
1986 def sortclonebundleentries(ui, entries):
1984 def sortclonebundleentries(ui, entries):
1987 prefers = ui.configlist('ui', 'clonebundleprefers')
1985 prefers = ui.configlist('ui', 'clonebundleprefers')
1988 if not prefers:
1986 if not prefers:
1989 return list(entries)
1987 return list(entries)
1990
1988
1991 prefers = [p.split('=', 1) for p in prefers]
1989 prefers = [p.split('=', 1) for p in prefers]
1992
1990
1993 items = sorted(clonebundleentry(v, prefers) for v in entries)
1991 items = sorted(clonebundleentry(v, prefers) for v in entries)
1994 return [i.value for i in items]
1992 return [i.value for i in items]
1995
1993
1996 def trypullbundlefromurl(ui, repo, url):
1994 def trypullbundlefromurl(ui, repo, url):
1997 """Attempt to apply a bundle from a URL."""
1995 """Attempt to apply a bundle from a URL."""
1998 with repo.lock(), repo.transaction('bundleurl') as tr:
1996 with repo.lock(), repo.transaction('bundleurl') as tr:
1999 try:
1997 try:
2000 fh = urlmod.open(ui, url)
1998 fh = urlmod.open(ui, url)
2001 cg = readbundle(ui, fh, 'stream')
1999 cg = readbundle(ui, fh, 'stream')
2002
2000
2003 if isinstance(cg, streamclone.streamcloneapplier):
2001 if isinstance(cg, streamclone.streamcloneapplier):
2004 cg.apply(repo)
2002 cg.apply(repo)
2005 else:
2003 else:
2006 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2004 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2007 return True
2005 return True
2008 except urlerr.httperror as e:
2006 except urlerr.httperror as e:
2009 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2007 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2010 except urlerr.urlerror as e:
2008 except urlerr.urlerror as e:
2011 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2009 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2012
2010
2013 return False
2011 return False
General Comments 0
You need to be logged in to leave comments. Login now