##// END OF EJS Templates
changegroup: replace getsubset with makechangegroup...
Durham Goode -
r34098:f85dfde1 default
parent child Browse files
Show More
@@ -1,1024 +1,1030 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 dagutil,
23 dagutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def writechunks(ui, chunks, filename, vfs=None):
63 def writechunks(ui, chunks, filename, vfs=None):
64 """Write chunks to a file and return its filename.
64 """Write chunks to a file and return its filename.
65
65
66 The stream is assumed to be a bundle file.
66 The stream is assumed to be a bundle file.
67 Existing files will not be overwritten.
67 Existing files will not be overwritten.
68 If no filename is specified, a temporary file is created.
68 If no filename is specified, a temporary file is created.
69 """
69 """
70 fh = None
70 fh = None
71 cleanup = None
71 cleanup = None
72 try:
72 try:
73 if filename:
73 if filename:
74 if vfs:
74 if vfs:
75 fh = vfs.open(filename, "wb")
75 fh = vfs.open(filename, "wb")
76 else:
76 else:
77 # Increase default buffer size because default is usually
77 # Increase default buffer size because default is usually
78 # small (4k is common on Linux).
78 # small (4k is common on Linux).
79 fh = open(filename, "wb", 131072)
79 fh = open(filename, "wb", 131072)
80 else:
80 else:
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
83 cleanup = filename
83 cleanup = filename
84 for c in chunks:
84 for c in chunks:
85 fh.write(c)
85 fh.write(c)
86 cleanup = None
86 cleanup = None
87 return filename
87 return filename
88 finally:
88 finally:
89 if fh is not None:
89 if fh is not None:
90 fh.close()
90 fh.close()
91 if cleanup is not None:
91 if cleanup is not None:
92 if filename and vfs:
92 if filename and vfs:
93 vfs.unlink(cleanup)
93 vfs.unlink(cleanup)
94 else:
94 else:
95 os.unlink(cleanup)
95 os.unlink(cleanup)
96
96
97 class cg1unpacker(object):
97 class cg1unpacker(object):
98 """Unpacker for cg1 changegroup streams.
98 """Unpacker for cg1 changegroup streams.
99
99
100 A changegroup unpacker handles the framing of the revision data in
100 A changegroup unpacker handles the framing of the revision data in
101 the wire format. Most consumers will want to use the apply()
101 the wire format. Most consumers will want to use the apply()
102 method to add the changes from the changegroup to a repository.
102 method to add the changes from the changegroup to a repository.
103
103
104 If you're forwarding a changegroup unmodified to another consumer,
104 If you're forwarding a changegroup unmodified to another consumer,
105 use getchunks(), which returns an iterator of changegroup
105 use getchunks(), which returns an iterator of changegroup
106 chunks. This is mostly useful for cases where you need to know the
106 chunks. This is mostly useful for cases where you need to know the
107 data stream has ended by observing the end of the changegroup.
107 data stream has ended by observing the end of the changegroup.
108
108
109 deltachunk() is useful only if you're applying delta data. Most
109 deltachunk() is useful only if you're applying delta data. Most
110 consumers should prefer apply() instead.
110 consumers should prefer apply() instead.
111
111
112 A few other public methods exist. Those are used only for
112 A few other public methods exist. Those are used only for
113 bundlerepo and some debug commands - their use is discouraged.
113 bundlerepo and some debug commands - their use is discouraged.
114 """
114 """
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
116 deltaheadersize = struct.calcsize(deltaheader)
116 deltaheadersize = struct.calcsize(deltaheader)
117 version = '01'
117 version = '01'
118 _grouplistcount = 1 # One list of files after the manifests
118 _grouplistcount = 1 # One list of files after the manifests
119
119
120 def __init__(self, fh, alg, extras=None):
120 def __init__(self, fh, alg, extras=None):
121 if alg is None:
121 if alg is None:
122 alg = 'UN'
122 alg = 'UN'
123 if alg not in util.compengines.supportedbundletypes:
123 if alg not in util.compengines.supportedbundletypes:
124 raise error.Abort(_('unknown stream compression type: %s')
124 raise error.Abort(_('unknown stream compression type: %s')
125 % alg)
125 % alg)
126 if alg == 'BZ':
126 if alg == 'BZ':
127 alg = '_truncatedBZ'
127 alg = '_truncatedBZ'
128
128
129 compengine = util.compengines.forbundletype(alg)
129 compengine = util.compengines.forbundletype(alg)
130 self._stream = compengine.decompressorreader(fh)
130 self._stream = compengine.decompressorreader(fh)
131 self._type = alg
131 self._type = alg
132 self.extras = extras or {}
132 self.extras = extras or {}
133 self.callback = None
133 self.callback = None
134
134
135 # These methods (compressed, read, seek, tell) all appear to only
135 # These methods (compressed, read, seek, tell) all appear to only
136 # be used by bundlerepo, but it's a little hard to tell.
136 # be used by bundlerepo, but it's a little hard to tell.
137 def compressed(self):
137 def compressed(self):
138 return self._type is not None and self._type != 'UN'
138 return self._type is not None and self._type != 'UN'
139 def read(self, l):
139 def read(self, l):
140 return self._stream.read(l)
140 return self._stream.read(l)
141 def seek(self, pos):
141 def seek(self, pos):
142 return self._stream.seek(pos)
142 return self._stream.seek(pos)
143 def tell(self):
143 def tell(self):
144 return self._stream.tell()
144 return self._stream.tell()
145 def close(self):
145 def close(self):
146 return self._stream.close()
146 return self._stream.close()
147
147
148 def _chunklength(self):
148 def _chunklength(self):
149 d = readexactly(self._stream, 4)
149 d = readexactly(self._stream, 4)
150 l = struct.unpack(">l", d)[0]
150 l = struct.unpack(">l", d)[0]
151 if l <= 4:
151 if l <= 4:
152 if l:
152 if l:
153 raise error.Abort(_("invalid chunk length %d") % l)
153 raise error.Abort(_("invalid chunk length %d") % l)
154 return 0
154 return 0
155 if self.callback:
155 if self.callback:
156 self.callback()
156 self.callback()
157 return l - 4
157 return l - 4
158
158
159 def changelogheader(self):
159 def changelogheader(self):
160 """v10 does not have a changelog header chunk"""
160 """v10 does not have a changelog header chunk"""
161 return {}
161 return {}
162
162
163 def manifestheader(self):
163 def manifestheader(self):
164 """v10 does not have a manifest header chunk"""
164 """v10 does not have a manifest header chunk"""
165 return {}
165 return {}
166
166
167 def filelogheader(self):
167 def filelogheader(self):
168 """return the header of the filelogs chunk, v10 only has the filename"""
168 """return the header of the filelogs chunk, v10 only has the filename"""
169 l = self._chunklength()
169 l = self._chunklength()
170 if not l:
170 if not l:
171 return {}
171 return {}
172 fname = readexactly(self._stream, l)
172 fname = readexactly(self._stream, l)
173 return {'filename': fname}
173 return {'filename': fname}
174
174
175 def _deltaheader(self, headertuple, prevnode):
175 def _deltaheader(self, headertuple, prevnode):
176 node, p1, p2, cs = headertuple
176 node, p1, p2, cs = headertuple
177 if prevnode is None:
177 if prevnode is None:
178 deltabase = p1
178 deltabase = p1
179 else:
179 else:
180 deltabase = prevnode
180 deltabase = prevnode
181 flags = 0
181 flags = 0
182 return node, p1, p2, deltabase, cs, flags
182 return node, p1, p2, deltabase, cs, flags
183
183
184 def deltachunk(self, prevnode):
184 def deltachunk(self, prevnode):
185 l = self._chunklength()
185 l = self._chunklength()
186 if not l:
186 if not l:
187 return {}
187 return {}
188 headerdata = readexactly(self._stream, self.deltaheadersize)
188 headerdata = readexactly(self._stream, self.deltaheadersize)
189 header = struct.unpack(self.deltaheader, headerdata)
189 header = struct.unpack(self.deltaheader, headerdata)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
194
194
195 def getchunks(self):
195 def getchunks(self):
196 """returns all the chunks contains in the bundle
196 """returns all the chunks contains in the bundle
197
197
198 Used when you need to forward the binary stream to a file or another
198 Used when you need to forward the binary stream to a file or another
199 network API. To do so, it parse the changegroup data, otherwise it will
199 network API. To do so, it parse the changegroup data, otherwise it will
200 block in case of sshrepo because it don't know the end of the stream.
200 block in case of sshrepo because it don't know the end of the stream.
201 """
201 """
202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
205 # filelogs.
205 # filelogs.
206 #
206 #
207 # Changelog and manifestlog parts are terminated with empty chunks. The
207 # Changelog and manifestlog parts are terminated with empty chunks. The
208 # tree and file parts are a list of entry sections. Each entry section
208 # tree and file parts are a list of entry sections. Each entry section
209 # is a series of chunks terminating in an empty chunk. The list of these
209 # is a series of chunks terminating in an empty chunk. The list of these
210 # entry sections is terminated in yet another empty chunk, so we know
210 # entry sections is terminated in yet another empty chunk, so we know
211 # we've reached the end of the tree/file list when we reach an empty
211 # we've reached the end of the tree/file list when we reach an empty
212 # chunk that was proceeded by no non-empty chunks.
212 # chunk that was proceeded by no non-empty chunks.
213
213
214 parts = 0
214 parts = 0
215 while parts < 2 + self._grouplistcount:
215 while parts < 2 + self._grouplistcount:
216 noentries = True
216 noentries = True
217 while True:
217 while True:
218 chunk = getchunk(self)
218 chunk = getchunk(self)
219 if not chunk:
219 if not chunk:
220 # The first two empty chunks represent the end of the
220 # The first two empty chunks represent the end of the
221 # changelog and the manifestlog portions. The remaining
221 # changelog and the manifestlog portions. The remaining
222 # empty chunks represent either A) the end of individual
222 # empty chunks represent either A) the end of individual
223 # tree or file entries in the file list, or B) the end of
223 # tree or file entries in the file list, or B) the end of
224 # the entire list. It's the end of the entire list if there
224 # the entire list. It's the end of the entire list if there
225 # were no entries (i.e. noentries is True).
225 # were no entries (i.e. noentries is True).
226 if parts < 2:
226 if parts < 2:
227 parts += 1
227 parts += 1
228 elif noentries:
228 elif noentries:
229 parts += 1
229 parts += 1
230 break
230 break
231 noentries = False
231 noentries = False
232 yield chunkheader(len(chunk))
232 yield chunkheader(len(chunk))
233 pos = 0
233 pos = 0
234 while pos < len(chunk):
234 while pos < len(chunk):
235 next = pos + 2**20
235 next = pos + 2**20
236 yield chunk[pos:next]
236 yield chunk[pos:next]
237 pos = next
237 pos = next
238 yield closechunk()
238 yield closechunk()
239
239
240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
241 # We know that we'll never have more manifests than we had
241 # We know that we'll never have more manifests than we had
242 # changesets.
242 # changesets.
243 self.callback = prog(_('manifests'), numchanges)
243 self.callback = prog(_('manifests'), numchanges)
244 # no need to check for empty manifest group here:
244 # no need to check for empty manifest group here:
245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
246 # no new manifest will be created and the manifest group will
246 # no new manifest will be created and the manifest group will
247 # be empty during the pull
247 # be empty during the pull
248 self.manifestheader()
248 self.manifestheader()
249 repo.manifestlog._revlog.addgroup(self, revmap, trp)
249 repo.manifestlog._revlog.addgroup(self, revmap, trp)
250 repo.ui.progress(_('manifests'), None)
250 repo.ui.progress(_('manifests'), None)
251 self.callback = None
251 self.callback = None
252
252
253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
254 expectedtotal=None):
254 expectedtotal=None):
255 """Add the changegroup returned by source.read() to this repo.
255 """Add the changegroup returned by source.read() to this repo.
256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
257 the URL of the repo where this changegroup is coming from.
257 the URL of the repo where this changegroup is coming from.
258
258
259 Return an integer summarizing the change to this repo:
259 Return an integer summarizing the change to this repo:
260 - nothing changed or no source: 0
260 - nothing changed or no source: 0
261 - more heads than before: 1+added heads (2..n)
261 - more heads than before: 1+added heads (2..n)
262 - fewer heads than before: -1-removed heads (-2..-n)
262 - fewer heads than before: -1-removed heads (-2..-n)
263 - number of heads stays the same: 1
263 - number of heads stays the same: 1
264 """
264 """
265 repo = repo.unfiltered()
265 repo = repo.unfiltered()
266 def csmap(x):
266 def csmap(x):
267 repo.ui.debug("add changeset %s\n" % short(x))
267 repo.ui.debug("add changeset %s\n" % short(x))
268 return len(cl)
268 return len(cl)
269
269
270 def revmap(x):
270 def revmap(x):
271 return cl.rev(x)
271 return cl.rev(x)
272
272
273 changesets = files = revisions = 0
273 changesets = files = revisions = 0
274
274
275 try:
275 try:
276 # The transaction may already carry source information. In this
276 # The transaction may already carry source information. In this
277 # case we use the top level data. We overwrite the argument
277 # case we use the top level data. We overwrite the argument
278 # because we need to use the top level value (if they exist)
278 # because we need to use the top level value (if they exist)
279 # in this function.
279 # in this function.
280 srctype = tr.hookargs.setdefault('source', srctype)
280 srctype = tr.hookargs.setdefault('source', srctype)
281 url = tr.hookargs.setdefault('url', url)
281 url = tr.hookargs.setdefault('url', url)
282 repo.hook('prechangegroup',
282 repo.hook('prechangegroup',
283 throw=True, **pycompat.strkwargs(tr.hookargs))
283 throw=True, **pycompat.strkwargs(tr.hookargs))
284
284
285 # write changelog data to temp files so concurrent readers
285 # write changelog data to temp files so concurrent readers
286 # will not see an inconsistent view
286 # will not see an inconsistent view
287 cl = repo.changelog
287 cl = repo.changelog
288 cl.delayupdate(tr)
288 cl.delayupdate(tr)
289 oldheads = set(cl.heads())
289 oldheads = set(cl.heads())
290
290
291 trp = weakref.proxy(tr)
291 trp = weakref.proxy(tr)
292 # pull off the changeset group
292 # pull off the changeset group
293 repo.ui.status(_("adding changesets\n"))
293 repo.ui.status(_("adding changesets\n"))
294 clstart = len(cl)
294 clstart = len(cl)
295 class prog(object):
295 class prog(object):
296 def __init__(self, step, total):
296 def __init__(self, step, total):
297 self._step = step
297 self._step = step
298 self._total = total
298 self._total = total
299 self._count = 1
299 self._count = 1
300 def __call__(self):
300 def __call__(self):
301 repo.ui.progress(self._step, self._count, unit=_('chunks'),
301 repo.ui.progress(self._step, self._count, unit=_('chunks'),
302 total=self._total)
302 total=self._total)
303 self._count += 1
303 self._count += 1
304 self.callback = prog(_('changesets'), expectedtotal)
304 self.callback = prog(_('changesets'), expectedtotal)
305
305
306 efiles = set()
306 efiles = set()
307 def onchangelog(cl, node):
307 def onchangelog(cl, node):
308 efiles.update(cl.readfiles(node))
308 efiles.update(cl.readfiles(node))
309
309
310 self.changelogheader()
310 self.changelogheader()
311 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
311 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
312 efiles = len(efiles)
312 efiles = len(efiles)
313
313
314 if not cgnodes:
314 if not cgnodes:
315 repo.ui.develwarn('applied empty changegroup',
315 repo.ui.develwarn('applied empty changegroup',
316 config='empty-changegroup')
316 config='empty-changegroup')
317 clend = len(cl)
317 clend = len(cl)
318 changesets = clend - clstart
318 changesets = clend - clstart
319 repo.ui.progress(_('changesets'), None)
319 repo.ui.progress(_('changesets'), None)
320 self.callback = None
320 self.callback = None
321
321
322 # pull off the manifest group
322 # pull off the manifest group
323 repo.ui.status(_("adding manifests\n"))
323 repo.ui.status(_("adding manifests\n"))
324 self._unpackmanifests(repo, revmap, trp, prog, changesets)
324 self._unpackmanifests(repo, revmap, trp, prog, changesets)
325
325
326 needfiles = {}
326 needfiles = {}
327 if repo.ui.configbool('server', 'validate'):
327 if repo.ui.configbool('server', 'validate'):
328 cl = repo.changelog
328 cl = repo.changelog
329 ml = repo.manifestlog
329 ml = repo.manifestlog
330 # validate incoming csets have their manifests
330 # validate incoming csets have their manifests
331 for cset in xrange(clstart, clend):
331 for cset in xrange(clstart, clend):
332 mfnode = cl.changelogrevision(cset).manifest
332 mfnode = cl.changelogrevision(cset).manifest
333 mfest = ml[mfnode].readdelta()
333 mfest = ml[mfnode].readdelta()
334 # store file cgnodes we must see
334 # store file cgnodes we must see
335 for f, n in mfest.iteritems():
335 for f, n in mfest.iteritems():
336 needfiles.setdefault(f, set()).add(n)
336 needfiles.setdefault(f, set()).add(n)
337
337
338 # process the files
338 # process the files
339 repo.ui.status(_("adding file changes\n"))
339 repo.ui.status(_("adding file changes\n"))
340 newrevs, newfiles = _addchangegroupfiles(
340 newrevs, newfiles = _addchangegroupfiles(
341 repo, self, revmap, trp, efiles, needfiles)
341 repo, self, revmap, trp, efiles, needfiles)
342 revisions += newrevs
342 revisions += newrevs
343 files += newfiles
343 files += newfiles
344
344
345 deltaheads = 0
345 deltaheads = 0
346 if oldheads:
346 if oldheads:
347 heads = cl.heads()
347 heads = cl.heads()
348 deltaheads = len(heads) - len(oldheads)
348 deltaheads = len(heads) - len(oldheads)
349 for h in heads:
349 for h in heads:
350 if h not in oldheads and repo[h].closesbranch():
350 if h not in oldheads and repo[h].closesbranch():
351 deltaheads -= 1
351 deltaheads -= 1
352 htext = ""
352 htext = ""
353 if deltaheads:
353 if deltaheads:
354 htext = _(" (%+d heads)") % deltaheads
354 htext = _(" (%+d heads)") % deltaheads
355
355
356 repo.ui.status(_("added %d changesets"
356 repo.ui.status(_("added %d changesets"
357 " with %d changes to %d files%s\n")
357 " with %d changes to %d files%s\n")
358 % (changesets, revisions, files, htext))
358 % (changesets, revisions, files, htext))
359 repo.invalidatevolatilesets()
359 repo.invalidatevolatilesets()
360
360
361 if changesets > 0:
361 if changesets > 0:
362 if 'node' not in tr.hookargs:
362 if 'node' not in tr.hookargs:
363 tr.hookargs['node'] = hex(cl.node(clstart))
363 tr.hookargs['node'] = hex(cl.node(clstart))
364 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
364 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
365 hookargs = dict(tr.hookargs)
365 hookargs = dict(tr.hookargs)
366 else:
366 else:
367 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
368 hookargs['node'] = hex(cl.node(clstart))
368 hookargs['node'] = hex(cl.node(clstart))
369 hookargs['node_last'] = hex(cl.node(clend - 1))
369 hookargs['node_last'] = hex(cl.node(clend - 1))
370 repo.hook('pretxnchangegroup',
370 repo.hook('pretxnchangegroup',
371 throw=True, **pycompat.strkwargs(hookargs))
371 throw=True, **pycompat.strkwargs(hookargs))
372
372
373 added = [cl.node(r) for r in xrange(clstart, clend)]
373 added = [cl.node(r) for r in xrange(clstart, clend)]
374 phaseall = None
374 phaseall = None
375 if srctype in ('push', 'serve'):
375 if srctype in ('push', 'serve'):
376 # Old servers can not push the boundary themselves.
376 # Old servers can not push the boundary themselves.
377 # New servers won't push the boundary if changeset already
377 # New servers won't push the boundary if changeset already
378 # exists locally as secret
378 # exists locally as secret
379 #
379 #
380 # We should not use added here but the list of all change in
380 # We should not use added here but the list of all change in
381 # the bundle
381 # the bundle
382 if repo.publishing():
382 if repo.publishing():
383 targetphase = phaseall = phases.public
383 targetphase = phaseall = phases.public
384 else:
384 else:
385 # closer target phase computation
385 # closer target phase computation
386
386
387 # Those changesets have been pushed from the
387 # Those changesets have been pushed from the
388 # outside, their phases are going to be pushed
388 # outside, their phases are going to be pushed
389 # alongside. Therefor `targetphase` is
389 # alongside. Therefor `targetphase` is
390 # ignored.
390 # ignored.
391 targetphase = phaseall = phases.draft
391 targetphase = phaseall = phases.draft
392 if added:
392 if added:
393 phases.registernew(repo, tr, targetphase, added)
393 phases.registernew(repo, tr, targetphase, added)
394 if phaseall is not None:
394 if phaseall is not None:
395 phases.advanceboundary(repo, tr, phaseall, cgnodes)
395 phases.advanceboundary(repo, tr, phaseall, cgnodes)
396
396
397 if changesets > 0:
397 if changesets > 0:
398
398
399 def runhooks():
399 def runhooks():
400 # These hooks run when the lock releases, not when the
400 # These hooks run when the lock releases, not when the
401 # transaction closes. So it's possible for the changelog
401 # transaction closes. So it's possible for the changelog
402 # to have changed since we last saw it.
402 # to have changed since we last saw it.
403 if clstart >= len(repo):
403 if clstart >= len(repo):
404 return
404 return
405
405
406 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
406 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
407
407
408 for n in added:
408 for n in added:
409 args = hookargs.copy()
409 args = hookargs.copy()
410 args['node'] = hex(n)
410 args['node'] = hex(n)
411 del args['node_last']
411 del args['node_last']
412 repo.hook("incoming", **pycompat.strkwargs(args))
412 repo.hook("incoming", **pycompat.strkwargs(args))
413
413
414 newheads = [h for h in repo.heads()
414 newheads = [h for h in repo.heads()
415 if h not in oldheads]
415 if h not in oldheads]
416 repo.ui.log("incoming",
416 repo.ui.log("incoming",
417 "%s incoming changes - new heads: %s\n",
417 "%s incoming changes - new heads: %s\n",
418 len(added),
418 len(added),
419 ', '.join([hex(c[:6]) for c in newheads]))
419 ', '.join([hex(c[:6]) for c in newheads]))
420
420
421 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
421 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
422 lambda tr: repo._afterlock(runhooks))
422 lambda tr: repo._afterlock(runhooks))
423 finally:
423 finally:
424 repo.ui.flush()
424 repo.ui.flush()
425 # never return 0 here:
425 # never return 0 here:
426 if deltaheads < 0:
426 if deltaheads < 0:
427 ret = deltaheads - 1
427 ret = deltaheads - 1
428 else:
428 else:
429 ret = deltaheads + 1
429 ret = deltaheads + 1
430 return ret
430 return ret
431
431
432 class cg2unpacker(cg1unpacker):
432 class cg2unpacker(cg1unpacker):
433 """Unpacker for cg2 streams.
433 """Unpacker for cg2 streams.
434
434
435 cg2 streams add support for generaldelta, so the delta header
435 cg2 streams add support for generaldelta, so the delta header
436 format is slightly different. All other features about the data
436 format is slightly different. All other features about the data
437 remain the same.
437 remain the same.
438 """
438 """
439 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
439 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
440 deltaheadersize = struct.calcsize(deltaheader)
440 deltaheadersize = struct.calcsize(deltaheader)
441 version = '02'
441 version = '02'
442
442
443 def _deltaheader(self, headertuple, prevnode):
443 def _deltaheader(self, headertuple, prevnode):
444 node, p1, p2, deltabase, cs = headertuple
444 node, p1, p2, deltabase, cs = headertuple
445 flags = 0
445 flags = 0
446 return node, p1, p2, deltabase, cs, flags
446 return node, p1, p2, deltabase, cs, flags
447
447
448 class cg3unpacker(cg2unpacker):
448 class cg3unpacker(cg2unpacker):
449 """Unpacker for cg3 streams.
449 """Unpacker for cg3 streams.
450
450
451 cg3 streams add support for exchanging treemanifests and revlog
451 cg3 streams add support for exchanging treemanifests and revlog
452 flags. It adds the revlog flags to the delta header and an empty chunk
452 flags. It adds the revlog flags to the delta header and an empty chunk
453 separating manifests and files.
453 separating manifests and files.
454 """
454 """
455 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
455 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
456 deltaheadersize = struct.calcsize(deltaheader)
456 deltaheadersize = struct.calcsize(deltaheader)
457 version = '03'
457 version = '03'
458 _grouplistcount = 2 # One list of manifests and one list of files
458 _grouplistcount = 2 # One list of manifests and one list of files
459
459
460 def _deltaheader(self, headertuple, prevnode):
460 def _deltaheader(self, headertuple, prevnode):
461 node, p1, p2, deltabase, cs, flags = headertuple
461 node, p1, p2, deltabase, cs, flags = headertuple
462 return node, p1, p2, deltabase, cs, flags
462 return node, p1, p2, deltabase, cs, flags
463
463
464 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
464 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
465 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
465 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
466 numchanges)
466 numchanges)
467 for chunkdata in iter(self.filelogheader, {}):
467 for chunkdata in iter(self.filelogheader, {}):
468 # If we get here, there are directory manifests in the changegroup
468 # If we get here, there are directory manifests in the changegroup
469 d = chunkdata["filename"]
469 d = chunkdata["filename"]
470 repo.ui.debug("adding %s revisions\n" % d)
470 repo.ui.debug("adding %s revisions\n" % d)
471 dirlog = repo.manifestlog._revlog.dirlog(d)
471 dirlog = repo.manifestlog._revlog.dirlog(d)
472 if not dirlog.addgroup(self, revmap, trp):
472 if not dirlog.addgroup(self, revmap, trp):
473 raise error.Abort(_("received dir revlog group is empty"))
473 raise error.Abort(_("received dir revlog group is empty"))
474
474
475 class headerlessfixup(object):
475 class headerlessfixup(object):
476 def __init__(self, fh, h):
476 def __init__(self, fh, h):
477 self._h = h
477 self._h = h
478 self._fh = fh
478 self._fh = fh
479 def read(self, n):
479 def read(self, n):
480 if self._h:
480 if self._h:
481 d, self._h = self._h[:n], self._h[n:]
481 d, self._h = self._h[:n], self._h[n:]
482 if len(d) < n:
482 if len(d) < n:
483 d += readexactly(self._fh, n - len(d))
483 d += readexactly(self._fh, n - len(d))
484 return d
484 return d
485 return readexactly(self._fh, n)
485 return readexactly(self._fh, n)
486
486
487 class cg1packer(object):
487 class cg1packer(object):
488 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
488 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
489 version = '01'
489 version = '01'
490 def __init__(self, repo, bundlecaps=None):
490 def __init__(self, repo, bundlecaps=None):
491 """Given a source repo, construct a bundler.
491 """Given a source repo, construct a bundler.
492
492
493 bundlecaps is optional and can be used to specify the set of
493 bundlecaps is optional and can be used to specify the set of
494 capabilities which can be used to build the bundle. While bundlecaps is
494 capabilities which can be used to build the bundle. While bundlecaps is
495 unused in core Mercurial, extensions rely on this feature to communicate
495 unused in core Mercurial, extensions rely on this feature to communicate
496 capabilities to customize the changegroup packer.
496 capabilities to customize the changegroup packer.
497 """
497 """
498 # Set of capabilities we can use to build the bundle.
498 # Set of capabilities we can use to build the bundle.
499 if bundlecaps is None:
499 if bundlecaps is None:
500 bundlecaps = set()
500 bundlecaps = set()
501 self._bundlecaps = bundlecaps
501 self._bundlecaps = bundlecaps
502 # experimental config: bundle.reorder
502 # experimental config: bundle.reorder
503 reorder = repo.ui.config('bundle', 'reorder')
503 reorder = repo.ui.config('bundle', 'reorder')
504 if reorder == 'auto':
504 if reorder == 'auto':
505 reorder = None
505 reorder = None
506 else:
506 else:
507 reorder = util.parsebool(reorder)
507 reorder = util.parsebool(reorder)
508 self._repo = repo
508 self._repo = repo
509 self._reorder = reorder
509 self._reorder = reorder
510 self._progress = repo.ui.progress
510 self._progress = repo.ui.progress
511 if self._repo.ui.verbose and not self._repo.ui.debugflag:
511 if self._repo.ui.verbose and not self._repo.ui.debugflag:
512 self._verbosenote = self._repo.ui.note
512 self._verbosenote = self._repo.ui.note
513 else:
513 else:
514 self._verbosenote = lambda s: None
514 self._verbosenote = lambda s: None
515
515
516 def close(self):
516 def close(self):
517 return closechunk()
517 return closechunk()
518
518
519 def fileheader(self, fname):
519 def fileheader(self, fname):
520 return chunkheader(len(fname)) + fname
520 return chunkheader(len(fname)) + fname
521
521
522 # Extracted both for clarity and for overriding in extensions.
522 # Extracted both for clarity and for overriding in extensions.
523 def _sortgroup(self, revlog, nodelist, lookup):
523 def _sortgroup(self, revlog, nodelist, lookup):
524 """Sort nodes for change group and turn them into revnums."""
524 """Sort nodes for change group and turn them into revnums."""
525 # for generaldelta revlogs, we linearize the revs; this will both be
525 # for generaldelta revlogs, we linearize the revs; this will both be
526 # much quicker and generate a much smaller bundle
526 # much quicker and generate a much smaller bundle
527 if (revlog._generaldelta and self._reorder is None) or self._reorder:
527 if (revlog._generaldelta and self._reorder is None) or self._reorder:
528 dag = dagutil.revlogdag(revlog)
528 dag = dagutil.revlogdag(revlog)
529 return dag.linearize(set(revlog.rev(n) for n in nodelist))
529 return dag.linearize(set(revlog.rev(n) for n in nodelist))
530 else:
530 else:
531 return sorted([revlog.rev(n) for n in nodelist])
531 return sorted([revlog.rev(n) for n in nodelist])
532
532
533 def group(self, nodelist, revlog, lookup, units=None):
533 def group(self, nodelist, revlog, lookup, units=None):
534 """Calculate a delta group, yielding a sequence of changegroup chunks
534 """Calculate a delta group, yielding a sequence of changegroup chunks
535 (strings).
535 (strings).
536
536
537 Given a list of changeset revs, return a set of deltas and
537 Given a list of changeset revs, return a set of deltas and
538 metadata corresponding to nodes. The first delta is
538 metadata corresponding to nodes. The first delta is
539 first parent(nodelist[0]) -> nodelist[0], the receiver is
539 first parent(nodelist[0]) -> nodelist[0], the receiver is
540 guaranteed to have this parent as it has all history before
540 guaranteed to have this parent as it has all history before
541 these changesets. In the case firstparent is nullrev the
541 these changesets. In the case firstparent is nullrev the
542 changegroup starts with a full revision.
542 changegroup starts with a full revision.
543
543
544 If units is not None, progress detail will be generated, units specifies
544 If units is not None, progress detail will be generated, units specifies
545 the type of revlog that is touched (changelog, manifest, etc.).
545 the type of revlog that is touched (changelog, manifest, etc.).
546 """
546 """
547 # if we don't have any revisions touched by these changesets, bail
547 # if we don't have any revisions touched by these changesets, bail
548 if len(nodelist) == 0:
548 if len(nodelist) == 0:
549 yield self.close()
549 yield self.close()
550 return
550 return
551
551
552 revs = self._sortgroup(revlog, nodelist, lookup)
552 revs = self._sortgroup(revlog, nodelist, lookup)
553
553
554 # add the parent of the first rev
554 # add the parent of the first rev
555 p = revlog.parentrevs(revs[0])[0]
555 p = revlog.parentrevs(revs[0])[0]
556 revs.insert(0, p)
556 revs.insert(0, p)
557
557
558 # build deltas
558 # build deltas
559 total = len(revs) - 1
559 total = len(revs) - 1
560 msgbundling = _('bundling')
560 msgbundling = _('bundling')
561 for r in xrange(len(revs) - 1):
561 for r in xrange(len(revs) - 1):
562 if units is not None:
562 if units is not None:
563 self._progress(msgbundling, r + 1, unit=units, total=total)
563 self._progress(msgbundling, r + 1, unit=units, total=total)
564 prev, curr = revs[r], revs[r + 1]
564 prev, curr = revs[r], revs[r + 1]
565 linknode = lookup(revlog.node(curr))
565 linknode = lookup(revlog.node(curr))
566 for c in self.revchunk(revlog, curr, prev, linknode):
566 for c in self.revchunk(revlog, curr, prev, linknode):
567 yield c
567 yield c
568
568
569 if units is not None:
569 if units is not None:
570 self._progress(msgbundling, None)
570 self._progress(msgbundling, None)
571 yield self.close()
571 yield self.close()
572
572
573 # filter any nodes that claim to be part of the known set
573 # filter any nodes that claim to be part of the known set
574 def prune(self, revlog, missing, commonrevs):
574 def prune(self, revlog, missing, commonrevs):
575 rr, rl = revlog.rev, revlog.linkrev
575 rr, rl = revlog.rev, revlog.linkrev
576 return [n for n in missing if rl(rr(n)) not in commonrevs]
576 return [n for n in missing if rl(rr(n)) not in commonrevs]
577
577
578 def _packmanifests(self, dir, mfnodes, lookuplinknode):
578 def _packmanifests(self, dir, mfnodes, lookuplinknode):
579 """Pack flat manifests into a changegroup stream."""
579 """Pack flat manifests into a changegroup stream."""
580 assert not dir
580 assert not dir
581 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
581 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
582 lookuplinknode, units=_('manifests')):
582 lookuplinknode, units=_('manifests')):
583 yield chunk
583 yield chunk
584
584
585 def _manifestsdone(self):
585 def _manifestsdone(self):
586 return ''
586 return ''
587
587
588 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
588 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
589 '''yield a sequence of changegroup chunks (strings)'''
589 '''yield a sequence of changegroup chunks (strings)'''
590 repo = self._repo
590 repo = self._repo
591 cl = repo.changelog
591 cl = repo.changelog
592
592
593 clrevorder = {}
593 clrevorder = {}
594 mfs = {} # needed manifests
594 mfs = {} # needed manifests
595 fnodes = {} # needed file nodes
595 fnodes = {} # needed file nodes
596 changedfiles = set()
596 changedfiles = set()
597
597
598 # Callback for the changelog, used to collect changed files and manifest
598 # Callback for the changelog, used to collect changed files and manifest
599 # nodes.
599 # nodes.
600 # Returns the linkrev node (identity in the changelog case).
600 # Returns the linkrev node (identity in the changelog case).
601 def lookupcl(x):
601 def lookupcl(x):
602 c = cl.read(x)
602 c = cl.read(x)
603 clrevorder[x] = len(clrevorder)
603 clrevorder[x] = len(clrevorder)
604 n = c[0]
604 n = c[0]
605 # record the first changeset introducing this manifest version
605 # record the first changeset introducing this manifest version
606 mfs.setdefault(n, x)
606 mfs.setdefault(n, x)
607 # Record a complete list of potentially-changed files in
607 # Record a complete list of potentially-changed files in
608 # this manifest.
608 # this manifest.
609 changedfiles.update(c[3])
609 changedfiles.update(c[3])
610 return x
610 return x
611
611
612 self._verbosenote(_('uncompressed size of bundle content:\n'))
612 self._verbosenote(_('uncompressed size of bundle content:\n'))
613 size = 0
613 size = 0
614 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
614 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
615 size += len(chunk)
615 size += len(chunk)
616 yield chunk
616 yield chunk
617 self._verbosenote(_('%8.i (changelog)\n') % size)
617 self._verbosenote(_('%8.i (changelog)\n') % size)
618
618
619 # We need to make sure that the linkrev in the changegroup refers to
619 # We need to make sure that the linkrev in the changegroup refers to
620 # the first changeset that introduced the manifest or file revision.
620 # the first changeset that introduced the manifest or file revision.
621 # The fastpath is usually safer than the slowpath, because the filelogs
621 # The fastpath is usually safer than the slowpath, because the filelogs
622 # are walked in revlog order.
622 # are walked in revlog order.
623 #
623 #
624 # When taking the slowpath with reorder=None and the manifest revlog
624 # When taking the slowpath with reorder=None and the manifest revlog
625 # uses generaldelta, the manifest may be walked in the "wrong" order.
625 # uses generaldelta, the manifest may be walked in the "wrong" order.
626 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
626 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
627 # cc0ff93d0c0c).
627 # cc0ff93d0c0c).
628 #
628 #
629 # When taking the fastpath, we are only vulnerable to reordering
629 # When taking the fastpath, we are only vulnerable to reordering
630 # of the changelog itself. The changelog never uses generaldelta, so
630 # of the changelog itself. The changelog never uses generaldelta, so
631 # it is only reordered when reorder=True. To handle this case, we
631 # it is only reordered when reorder=True. To handle this case, we
632 # simply take the slowpath, which already has the 'clrevorder' logic.
632 # simply take the slowpath, which already has the 'clrevorder' logic.
633 # This was also fixed in cc0ff93d0c0c.
633 # This was also fixed in cc0ff93d0c0c.
634 fastpathlinkrev = fastpathlinkrev and not self._reorder
634 fastpathlinkrev = fastpathlinkrev and not self._reorder
635 # Treemanifests don't work correctly with fastpathlinkrev
635 # Treemanifests don't work correctly with fastpathlinkrev
636 # either, because we don't discover which directory nodes to
636 # either, because we don't discover which directory nodes to
637 # send along with files. This could probably be fixed.
637 # send along with files. This could probably be fixed.
638 fastpathlinkrev = fastpathlinkrev and (
638 fastpathlinkrev = fastpathlinkrev and (
639 'treemanifest' not in repo.requirements)
639 'treemanifest' not in repo.requirements)
640
640
641 for chunk in self.generatemanifests(commonrevs, clrevorder,
641 for chunk in self.generatemanifests(commonrevs, clrevorder,
642 fastpathlinkrev, mfs, fnodes):
642 fastpathlinkrev, mfs, fnodes):
643 yield chunk
643 yield chunk
644 mfs.clear()
644 mfs.clear()
645 clrevs = set(cl.rev(x) for x in clnodes)
645 clrevs = set(cl.rev(x) for x in clnodes)
646
646
647 if not fastpathlinkrev:
647 if not fastpathlinkrev:
648 def linknodes(unused, fname):
648 def linknodes(unused, fname):
649 return fnodes.get(fname, {})
649 return fnodes.get(fname, {})
650 else:
650 else:
651 cln = cl.node
651 cln = cl.node
652 def linknodes(filerevlog, fname):
652 def linknodes(filerevlog, fname):
653 llr = filerevlog.linkrev
653 llr = filerevlog.linkrev
654 fln = filerevlog.node
654 fln = filerevlog.node
655 revs = ((r, llr(r)) for r in filerevlog)
655 revs = ((r, llr(r)) for r in filerevlog)
656 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
656 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
657
657
658 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
658 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
659 source):
659 source):
660 yield chunk
660 yield chunk
661
661
662 yield self.close()
662 yield self.close()
663
663
664 if clnodes:
664 if clnodes:
665 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
665 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
666
666
667 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
667 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
668 fnodes):
668 fnodes):
669 repo = self._repo
669 repo = self._repo
670 mfl = repo.manifestlog
670 mfl = repo.manifestlog
671 dirlog = mfl._revlog.dirlog
671 dirlog = mfl._revlog.dirlog
672 tmfnodes = {'': mfs}
672 tmfnodes = {'': mfs}
673
673
674 # Callback for the manifest, used to collect linkrevs for filelog
674 # Callback for the manifest, used to collect linkrevs for filelog
675 # revisions.
675 # revisions.
676 # Returns the linkrev node (collected in lookupcl).
676 # Returns the linkrev node (collected in lookupcl).
677 def makelookupmflinknode(dir):
677 def makelookupmflinknode(dir):
678 if fastpathlinkrev:
678 if fastpathlinkrev:
679 assert not dir
679 assert not dir
680 return mfs.__getitem__
680 return mfs.__getitem__
681
681
682 def lookupmflinknode(x):
682 def lookupmflinknode(x):
683 """Callback for looking up the linknode for manifests.
683 """Callback for looking up the linknode for manifests.
684
684
685 Returns the linkrev node for the specified manifest.
685 Returns the linkrev node for the specified manifest.
686
686
687 SIDE EFFECT:
687 SIDE EFFECT:
688
688
689 1) fclnodes gets populated with the list of relevant
689 1) fclnodes gets populated with the list of relevant
690 file nodes if we're not using fastpathlinkrev
690 file nodes if we're not using fastpathlinkrev
691 2) When treemanifests are in use, collects treemanifest nodes
691 2) When treemanifests are in use, collects treemanifest nodes
692 to send
692 to send
693
693
694 Note that this means manifests must be completely sent to
694 Note that this means manifests must be completely sent to
695 the client before you can trust the list of files and
695 the client before you can trust the list of files and
696 treemanifests to send.
696 treemanifests to send.
697 """
697 """
698 clnode = tmfnodes[dir][x]
698 clnode = tmfnodes[dir][x]
699 mdata = mfl.get(dir, x).readfast(shallow=True)
699 mdata = mfl.get(dir, x).readfast(shallow=True)
700 for p, n, fl in mdata.iterentries():
700 for p, n, fl in mdata.iterentries():
701 if fl == 't': # subdirectory manifest
701 if fl == 't': # subdirectory manifest
702 subdir = dir + p + '/'
702 subdir = dir + p + '/'
703 tmfclnodes = tmfnodes.setdefault(subdir, {})
703 tmfclnodes = tmfnodes.setdefault(subdir, {})
704 tmfclnode = tmfclnodes.setdefault(n, clnode)
704 tmfclnode = tmfclnodes.setdefault(n, clnode)
705 if clrevorder[clnode] < clrevorder[tmfclnode]:
705 if clrevorder[clnode] < clrevorder[tmfclnode]:
706 tmfclnodes[n] = clnode
706 tmfclnodes[n] = clnode
707 else:
707 else:
708 f = dir + p
708 f = dir + p
709 fclnodes = fnodes.setdefault(f, {})
709 fclnodes = fnodes.setdefault(f, {})
710 fclnode = fclnodes.setdefault(n, clnode)
710 fclnode = fclnodes.setdefault(n, clnode)
711 if clrevorder[clnode] < clrevorder[fclnode]:
711 if clrevorder[clnode] < clrevorder[fclnode]:
712 fclnodes[n] = clnode
712 fclnodes[n] = clnode
713 return clnode
713 return clnode
714 return lookupmflinknode
714 return lookupmflinknode
715
715
716 size = 0
716 size = 0
717 while tmfnodes:
717 while tmfnodes:
718 dir = min(tmfnodes)
718 dir = min(tmfnodes)
719 nodes = tmfnodes[dir]
719 nodes = tmfnodes[dir]
720 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
720 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
721 if not dir or prunednodes:
721 if not dir or prunednodes:
722 for x in self._packmanifests(dir, prunednodes,
722 for x in self._packmanifests(dir, prunednodes,
723 makelookupmflinknode(dir)):
723 makelookupmflinknode(dir)):
724 size += len(x)
724 size += len(x)
725 yield x
725 yield x
726 del tmfnodes[dir]
726 del tmfnodes[dir]
727 self._verbosenote(_('%8.i (manifests)\n') % size)
727 self._verbosenote(_('%8.i (manifests)\n') % size)
728 yield self._manifestsdone()
728 yield self._manifestsdone()
729
729
730 # The 'source' parameter is useful for extensions
730 # The 'source' parameter is useful for extensions
731 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
731 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
732 repo = self._repo
732 repo = self._repo
733 progress = self._progress
733 progress = self._progress
734 msgbundling = _('bundling')
734 msgbundling = _('bundling')
735
735
736 total = len(changedfiles)
736 total = len(changedfiles)
737 # for progress output
737 # for progress output
738 msgfiles = _('files')
738 msgfiles = _('files')
739 for i, fname in enumerate(sorted(changedfiles)):
739 for i, fname in enumerate(sorted(changedfiles)):
740 filerevlog = repo.file(fname)
740 filerevlog = repo.file(fname)
741 if not filerevlog:
741 if not filerevlog:
742 raise error.Abort(_("empty or missing revlog for %s") % fname)
742 raise error.Abort(_("empty or missing revlog for %s") % fname)
743
743
744 linkrevnodes = linknodes(filerevlog, fname)
744 linkrevnodes = linknodes(filerevlog, fname)
745 # Lookup for filenodes, we collected the linkrev nodes above in the
745 # Lookup for filenodes, we collected the linkrev nodes above in the
746 # fastpath case and with lookupmf in the slowpath case.
746 # fastpath case and with lookupmf in the slowpath case.
747 def lookupfilelog(x):
747 def lookupfilelog(x):
748 return linkrevnodes[x]
748 return linkrevnodes[x]
749
749
750 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
750 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
751 if filenodes:
751 if filenodes:
752 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
752 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
753 total=total)
753 total=total)
754 h = self.fileheader(fname)
754 h = self.fileheader(fname)
755 size = len(h)
755 size = len(h)
756 yield h
756 yield h
757 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
757 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
758 size += len(chunk)
758 size += len(chunk)
759 yield chunk
759 yield chunk
760 self._verbosenote(_('%8.i %s\n') % (size, fname))
760 self._verbosenote(_('%8.i %s\n') % (size, fname))
761 progress(msgbundling, None)
761 progress(msgbundling, None)
762
762
763 def deltaparent(self, revlog, rev, p1, p2, prev):
763 def deltaparent(self, revlog, rev, p1, p2, prev):
764 return prev
764 return prev
765
765
766 def revchunk(self, revlog, rev, prev, linknode):
766 def revchunk(self, revlog, rev, prev, linknode):
767 node = revlog.node(rev)
767 node = revlog.node(rev)
768 p1, p2 = revlog.parentrevs(rev)
768 p1, p2 = revlog.parentrevs(rev)
769 base = self.deltaparent(revlog, rev, p1, p2, prev)
769 base = self.deltaparent(revlog, rev, p1, p2, prev)
770
770
771 prefix = ''
771 prefix = ''
772 if revlog.iscensored(base) or revlog.iscensored(rev):
772 if revlog.iscensored(base) or revlog.iscensored(rev):
773 try:
773 try:
774 delta = revlog.revision(node, raw=True)
774 delta = revlog.revision(node, raw=True)
775 except error.CensoredNodeError as e:
775 except error.CensoredNodeError as e:
776 delta = e.tombstone
776 delta = e.tombstone
777 if base == nullrev:
777 if base == nullrev:
778 prefix = mdiff.trivialdiffheader(len(delta))
778 prefix = mdiff.trivialdiffheader(len(delta))
779 else:
779 else:
780 baselen = revlog.rawsize(base)
780 baselen = revlog.rawsize(base)
781 prefix = mdiff.replacediffheader(baselen, len(delta))
781 prefix = mdiff.replacediffheader(baselen, len(delta))
782 elif base == nullrev:
782 elif base == nullrev:
783 delta = revlog.revision(node, raw=True)
783 delta = revlog.revision(node, raw=True)
784 prefix = mdiff.trivialdiffheader(len(delta))
784 prefix = mdiff.trivialdiffheader(len(delta))
785 else:
785 else:
786 delta = revlog.revdiff(base, rev)
786 delta = revlog.revdiff(base, rev)
787 p1n, p2n = revlog.parents(node)
787 p1n, p2n = revlog.parents(node)
788 basenode = revlog.node(base)
788 basenode = revlog.node(base)
789 flags = revlog.flags(rev)
789 flags = revlog.flags(rev)
790 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
790 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
791 meta += prefix
791 meta += prefix
792 l = len(meta) + len(delta)
792 l = len(meta) + len(delta)
793 yield chunkheader(l)
793 yield chunkheader(l)
794 yield meta
794 yield meta
795 yield delta
795 yield delta
796 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
796 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
797 # do nothing with basenode, it is implicitly the previous one in HG10
797 # do nothing with basenode, it is implicitly the previous one in HG10
798 # do nothing with flags, it is implicitly 0 for cg1 and cg2
798 # do nothing with flags, it is implicitly 0 for cg1 and cg2
799 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
799 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
800
800
801 class cg2packer(cg1packer):
801 class cg2packer(cg1packer):
802 version = '02'
802 version = '02'
803 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
803 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
804
804
805 def __init__(self, repo, bundlecaps=None):
805 def __init__(self, repo, bundlecaps=None):
806 super(cg2packer, self).__init__(repo, bundlecaps)
806 super(cg2packer, self).__init__(repo, bundlecaps)
807 if self._reorder is None:
807 if self._reorder is None:
808 # Since generaldelta is directly supported by cg2, reordering
808 # Since generaldelta is directly supported by cg2, reordering
809 # generally doesn't help, so we disable it by default (treating
809 # generally doesn't help, so we disable it by default (treating
810 # bundle.reorder=auto just like bundle.reorder=False).
810 # bundle.reorder=auto just like bundle.reorder=False).
811 self._reorder = False
811 self._reorder = False
812
812
813 def deltaparent(self, revlog, rev, p1, p2, prev):
813 def deltaparent(self, revlog, rev, p1, p2, prev):
814 dp = revlog.deltaparent(rev)
814 dp = revlog.deltaparent(rev)
815 if dp == nullrev and revlog.storedeltachains:
815 if dp == nullrev and revlog.storedeltachains:
816 # Avoid sending full revisions when delta parent is null. Pick prev
816 # Avoid sending full revisions when delta parent is null. Pick prev
817 # in that case. It's tempting to pick p1 in this case, as p1 will
817 # in that case. It's tempting to pick p1 in this case, as p1 will
818 # be smaller in the common case. However, computing a delta against
818 # be smaller in the common case. However, computing a delta against
819 # p1 may require resolving the raw text of p1, which could be
819 # p1 may require resolving the raw text of p1, which could be
820 # expensive. The revlog caches should have prev cached, meaning
820 # expensive. The revlog caches should have prev cached, meaning
821 # less CPU for changegroup generation. There is likely room to add
821 # less CPU for changegroup generation. There is likely room to add
822 # a flag and/or config option to control this behavior.
822 # a flag and/or config option to control this behavior.
823 return prev
823 return prev
824 elif dp == nullrev:
824 elif dp == nullrev:
825 # revlog is configured to use full snapshot for a reason,
825 # revlog is configured to use full snapshot for a reason,
826 # stick to full snapshot.
826 # stick to full snapshot.
827 return nullrev
827 return nullrev
828 elif dp not in (p1, p2, prev):
828 elif dp not in (p1, p2, prev):
829 # Pick prev when we can't be sure remote has the base revision.
829 # Pick prev when we can't be sure remote has the base revision.
830 return prev
830 return prev
831 else:
831 else:
832 return dp
832 return dp
833
833
834 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
834 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
835 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
835 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
836 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
836 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
837
837
838 class cg3packer(cg2packer):
838 class cg3packer(cg2packer):
839 version = '03'
839 version = '03'
840 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
840 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
841
841
842 def _packmanifests(self, dir, mfnodes, lookuplinknode):
842 def _packmanifests(self, dir, mfnodes, lookuplinknode):
843 if dir:
843 if dir:
844 yield self.fileheader(dir)
844 yield self.fileheader(dir)
845
845
846 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
846 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
847 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
847 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
848 units=_('manifests')):
848 units=_('manifests')):
849 yield chunk
849 yield chunk
850
850
851 def _manifestsdone(self):
851 def _manifestsdone(self):
852 return self.close()
852 return self.close()
853
853
854 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
854 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
855 return struct.pack(
855 return struct.pack(
856 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
856 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
857
857
858 _packermap = {'01': (cg1packer, cg1unpacker),
858 _packermap = {'01': (cg1packer, cg1unpacker),
859 # cg2 adds support for exchanging generaldelta
859 # cg2 adds support for exchanging generaldelta
860 '02': (cg2packer, cg2unpacker),
860 '02': (cg2packer, cg2unpacker),
861 # cg3 adds support for exchanging revlog flags and treemanifests
861 # cg3 adds support for exchanging revlog flags and treemanifests
862 '03': (cg3packer, cg3unpacker),
862 '03': (cg3packer, cg3unpacker),
863 }
863 }
864
864
865 def allsupportedversions(repo):
865 def allsupportedversions(repo):
866 versions = set(_packermap.keys())
866 versions = set(_packermap.keys())
867 if not (repo.ui.configbool('experimental', 'changegroup3') or
867 if not (repo.ui.configbool('experimental', 'changegroup3') or
868 repo.ui.configbool('experimental', 'treemanifest') or
868 repo.ui.configbool('experimental', 'treemanifest') or
869 'treemanifest' in repo.requirements):
869 'treemanifest' in repo.requirements):
870 versions.discard('03')
870 versions.discard('03')
871 return versions
871 return versions
872
872
873 # Changegroup versions that can be applied to the repo
873 # Changegroup versions that can be applied to the repo
874 def supportedincomingversions(repo):
874 def supportedincomingversions(repo):
875 return allsupportedversions(repo)
875 return allsupportedversions(repo)
876
876
877 # Changegroup versions that can be created from the repo
877 # Changegroup versions that can be created from the repo
878 def supportedoutgoingversions(repo):
878 def supportedoutgoingversions(repo):
879 versions = allsupportedversions(repo)
879 versions = allsupportedversions(repo)
880 if 'treemanifest' in repo.requirements:
880 if 'treemanifest' in repo.requirements:
881 # Versions 01 and 02 support only flat manifests and it's just too
881 # Versions 01 and 02 support only flat manifests and it's just too
882 # expensive to convert between the flat manifest and tree manifest on
882 # expensive to convert between the flat manifest and tree manifest on
883 # the fly. Since tree manifests are hashed differently, all of history
883 # the fly. Since tree manifests are hashed differently, all of history
884 # would have to be converted. Instead, we simply don't even pretend to
884 # would have to be converted. Instead, we simply don't even pretend to
885 # support versions 01 and 02.
885 # support versions 01 and 02.
886 versions.discard('01')
886 versions.discard('01')
887 versions.discard('02')
887 versions.discard('02')
888 return versions
888 return versions
889
889
890 def safeversion(repo):
890 def safeversion(repo):
891 # Finds the smallest version that it's safe to assume clients of the repo
891 # Finds the smallest version that it's safe to assume clients of the repo
892 # will support. For example, all hg versions that support generaldelta also
892 # will support. For example, all hg versions that support generaldelta also
893 # support changegroup 02.
893 # support changegroup 02.
894 versions = supportedoutgoingversions(repo)
894 versions = supportedoutgoingversions(repo)
895 if 'generaldelta' in repo.requirements:
895 if 'generaldelta' in repo.requirements:
896 versions.discard('01')
896 versions.discard('01')
897 assert versions
897 assert versions
898 return min(versions)
898 return min(versions)
899
899
900 def getbundler(version, repo, bundlecaps=None):
900 def getbundler(version, repo, bundlecaps=None):
901 assert version in supportedoutgoingversions(repo)
901 assert version in supportedoutgoingversions(repo)
902 return _packermap[version][0](repo, bundlecaps)
902 return _packermap[version][0](repo, bundlecaps)
903
903
904 def getunbundler(version, fh, alg, extras=None):
904 def getunbundler(version, fh, alg, extras=None):
905 return _packermap[version][1](fh, alg, extras=extras)
905 return _packermap[version][1](fh, alg, extras=extras)
906
906
907 def _changegroupinfo(repo, nodes, source):
907 def _changegroupinfo(repo, nodes, source):
908 if repo.ui.verbose or source == 'bundle':
908 if repo.ui.verbose or source == 'bundle':
909 repo.ui.status(_("%d changesets found\n") % len(nodes))
909 repo.ui.status(_("%d changesets found\n") % len(nodes))
910 if repo.ui.debugflag:
910 if repo.ui.debugflag:
911 repo.ui.debug("list of changesets:\n")
911 repo.ui.debug("list of changesets:\n")
912 for node in nodes:
912 for node in nodes:
913 repo.ui.debug("%s\n" % hex(node))
913 repo.ui.debug("%s\n" % hex(node))
914
914
915 def makestream(repo, outgoing, version, source, fastpath=False,
916 bundlecaps=None):
917 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
918 return getsubsetraw(repo, outgoing, bundler, source, fastpath=fastpath)
919
920 def makechangegroup(repo, outgoing, version, source, fastpath=False,
921 bundlecaps=None):
922 cgstream = makestream(repo, outgoing, version, source,
923 fastpath=fastpath, bundlecaps=bundlecaps)
924 return getunbundler(version, util.chunkbuffer(cgstream), None,
925 {'clcount': len(outgoing.missing) })
926
915 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
927 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
916 repo = repo.unfiltered()
928 repo = repo.unfiltered()
917 commonrevs = outgoing.common
929 commonrevs = outgoing.common
918 csets = outgoing.missing
930 csets = outgoing.missing
919 heads = outgoing.missingheads
931 heads = outgoing.missingheads
920 # We go through the fast path if we get told to, or if all (unfiltered
932 # We go through the fast path if we get told to, or if all (unfiltered
921 # heads have been requested (since we then know there all linkrevs will
933 # heads have been requested (since we then know there all linkrevs will
922 # be pulled by the client).
934 # be pulled by the client).
923 heads.sort()
935 heads.sort()
924 fastpathlinkrev = fastpath or (
936 fastpathlinkrev = fastpath or (
925 repo.filtername is None and heads == sorted(repo.heads()))
937 repo.filtername is None and heads == sorted(repo.heads()))
926
938
927 repo.hook('preoutgoing', throw=True, source=source)
939 repo.hook('preoutgoing', throw=True, source=source)
928 _changegroupinfo(repo, csets, source)
940 _changegroupinfo(repo, csets, source)
929 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
941 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
930
942
931 def getsubset(repo, outgoing, bundler, source, fastpath=False):
932 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
933 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
934 {'clcount': len(outgoing.missing)})
935
936 def changegroupsubset(repo, roots, heads, source, version='01'):
943 def changegroupsubset(repo, roots, heads, source, version='01'):
937 """Compute a changegroup consisting of all the nodes that are
944 """Compute a changegroup consisting of all the nodes that are
938 descendants of any of the roots and ancestors of any of the heads.
945 descendants of any of the roots and ancestors of any of the heads.
939 Return a chunkbuffer object whose read() method will return
946 Return a chunkbuffer object whose read() method will return
940 successive changegroup chunks.
947 successive changegroup chunks.
941
948
942 It is fairly complex as determining which filenodes and which
949 It is fairly complex as determining which filenodes and which
943 manifest nodes need to be included for the changeset to be complete
950 manifest nodes need to be included for the changeset to be complete
944 is non-trivial.
951 is non-trivial.
945
952
946 Another wrinkle is doing the reverse, figuring out which changeset in
953 Another wrinkle is doing the reverse, figuring out which changeset in
947 the changegroup a particular filenode or manifestnode belongs to.
954 the changegroup a particular filenode or manifestnode belongs to.
948 """
955 """
949 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
956 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
950 bundler = getbundler(version, repo)
957 return makechangegroup(repo, outgoing, version, source)
951 return getsubset(repo, outgoing, bundler, source)
952
958
953 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
959 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
954 version='01'):
960 version='01'):
955 """Like getbundle, but taking a discovery.outgoing as an argument.
961 """Like getbundle, but taking a discovery.outgoing as an argument.
956
962
957 This is only implemented for local repos and reuses potentially
963 This is only implemented for local repos and reuses potentially
958 precomputed sets in outgoing. Returns a raw changegroup generator."""
964 precomputed sets in outgoing. Returns a raw changegroup generator."""
959 if not outgoing.missing:
965 if not outgoing.missing:
960 return None
966 return None
961 bundler = getbundler(version, repo, bundlecaps)
967 bundler = getbundler(version, repo, bundlecaps)
962 return getsubsetraw(repo, outgoing, bundler, source)
968 return getsubsetraw(repo, outgoing, bundler, source)
963
969
964 def getchangegroup(repo, source, outgoing, bundlecaps=None,
970 def getchangegroup(repo, source, outgoing, bundlecaps=None,
965 version='01'):
971 version='01'):
966 """Like getbundle, but taking a discovery.outgoing as an argument.
972 """Like getbundle, but taking a discovery.outgoing as an argument.
967
973
968 This is only implemented for local repos and reuses potentially
974 This is only implemented for local repos and reuses potentially
969 precomputed sets in outgoing."""
975 precomputed sets in outgoing."""
970 if not outgoing.missing:
976 if not outgoing.missing:
971 return None
977 return None
972 bundler = getbundler(version, repo, bundlecaps)
978 return makechangegroup(repo, outgoing, version, source,
973 return getsubset(repo, outgoing, bundler, source)
979 bundlecaps=bundlecaps)
974
980
975 def getlocalchangegroup(repo, *args, **kwargs):
981 def getlocalchangegroup(repo, *args, **kwargs):
976 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
982 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
977 '4.3')
983 '4.3')
978 return getchangegroup(repo, *args, **kwargs)
984 return getchangegroup(repo, *args, **kwargs)
979
985
980 def changegroup(repo, basenodes, source):
986 def changegroup(repo, basenodes, source):
981 # to avoid a race we use changegroupsubset() (issue1320)
987 # to avoid a race we use changegroupsubset() (issue1320)
982 return changegroupsubset(repo, basenodes, repo.heads(), source)
988 return changegroupsubset(repo, basenodes, repo.heads(), source)
983
989
984 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
990 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
985 revisions = 0
991 revisions = 0
986 files = 0
992 files = 0
987 for chunkdata in iter(source.filelogheader, {}):
993 for chunkdata in iter(source.filelogheader, {}):
988 files += 1
994 files += 1
989 f = chunkdata["filename"]
995 f = chunkdata["filename"]
990 repo.ui.debug("adding %s revisions\n" % f)
996 repo.ui.debug("adding %s revisions\n" % f)
991 repo.ui.progress(_('files'), files, unit=_('files'),
997 repo.ui.progress(_('files'), files, unit=_('files'),
992 total=expectedfiles)
998 total=expectedfiles)
993 fl = repo.file(f)
999 fl = repo.file(f)
994 o = len(fl)
1000 o = len(fl)
995 try:
1001 try:
996 if not fl.addgroup(source, revmap, trp):
1002 if not fl.addgroup(source, revmap, trp):
997 raise error.Abort(_("received file revlog group is empty"))
1003 raise error.Abort(_("received file revlog group is empty"))
998 except error.CensoredBaseError as e:
1004 except error.CensoredBaseError as e:
999 raise error.Abort(_("received delta base is censored: %s") % e)
1005 raise error.Abort(_("received delta base is censored: %s") % e)
1000 revisions += len(fl) - o
1006 revisions += len(fl) - o
1001 if f in needfiles:
1007 if f in needfiles:
1002 needs = needfiles[f]
1008 needs = needfiles[f]
1003 for new in xrange(o, len(fl)):
1009 for new in xrange(o, len(fl)):
1004 n = fl.node(new)
1010 n = fl.node(new)
1005 if n in needs:
1011 if n in needs:
1006 needs.remove(n)
1012 needs.remove(n)
1007 else:
1013 else:
1008 raise error.Abort(
1014 raise error.Abort(
1009 _("received spurious file revlog entry"))
1015 _("received spurious file revlog entry"))
1010 if not needs:
1016 if not needs:
1011 del needfiles[f]
1017 del needfiles[f]
1012 repo.ui.progress(_('files'), None)
1018 repo.ui.progress(_('files'), None)
1013
1019
1014 for f, needs in needfiles.iteritems():
1020 for f, needs in needfiles.iteritems():
1015 fl = repo.file(f)
1021 fl = repo.file(f)
1016 for n in needs:
1022 for n in needs:
1017 try:
1023 try:
1018 fl.rev(n)
1024 fl.rev(n)
1019 except error.LookupError:
1025 except error.LookupError:
1020 raise error.Abort(
1026 raise error.Abort(
1021 _('missing file data for %s:%s - run hg verify') %
1027 _('missing file data for %s:%s - run hg verify') %
1022 (f, hex(n)))
1028 (f, hex(n)))
1023
1029
1024 return revisions, files
1030 return revisions, files
@@ -1,2017 +1,2013 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks as bookmod,
19 bookmarks as bookmod,
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 lock as lockmod,
24 lock as lockmod,
25 obsolete,
25 obsolete,
26 phases,
26 phases,
27 pushkey,
27 pushkey,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sslutil,
30 sslutil,
31 streamclone,
31 streamclone,
32 url as urlmod,
32 url as urlmod,
33 util,
33 util,
34 )
34 )
35
35
36 urlerr = util.urlerr
36 urlerr = util.urlerr
37 urlreq = util.urlreq
37 urlreq = util.urlreq
38
38
39 # Maps bundle version human names to changegroup versions.
39 # Maps bundle version human names to changegroup versions.
40 _bundlespeccgversions = {'v1': '01',
40 _bundlespeccgversions = {'v1': '01',
41 'v2': '02',
41 'v2': '02',
42 'packed1': 's1',
42 'packed1': 's1',
43 'bundle2': '02', #legacy
43 'bundle2': '02', #legacy
44 }
44 }
45
45
46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48
48
49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 """Parse a bundle string specification into parts.
50 """Parse a bundle string specification into parts.
51
51
52 Bundle specifications denote a well-defined bundle/exchange format.
52 Bundle specifications denote a well-defined bundle/exchange format.
53 The content of a given specification should not change over time in
53 The content of a given specification should not change over time in
54 order to ensure that bundles produced by a newer version of Mercurial are
54 order to ensure that bundles produced by a newer version of Mercurial are
55 readable from an older version.
55 readable from an older version.
56
56
57 The string currently has the form:
57 The string currently has the form:
58
58
59 <compression>-<type>[;<parameter0>[;<parameter1>]]
59 <compression>-<type>[;<parameter0>[;<parameter1>]]
60
60
61 Where <compression> is one of the supported compression formats
61 Where <compression> is one of the supported compression formats
62 and <type> is (currently) a version string. A ";" can follow the type and
62 and <type> is (currently) a version string. A ";" can follow the type and
63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 pairs.
64 pairs.
65
65
66 If ``strict`` is True (the default) <compression> is required. Otherwise,
66 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 it is optional.
67 it is optional.
68
68
69 If ``externalnames`` is False (the default), the human-centric names will
69 If ``externalnames`` is False (the default), the human-centric names will
70 be converted to their internal representation.
70 be converted to their internal representation.
71
71
72 Returns a 3-tuple of (compression, version, parameters). Compression will
72 Returns a 3-tuple of (compression, version, parameters). Compression will
73 be ``None`` if not in strict mode and a compression isn't defined.
73 be ``None`` if not in strict mode and a compression isn't defined.
74
74
75 An ``InvalidBundleSpecification`` is raised when the specification is
75 An ``InvalidBundleSpecification`` is raised when the specification is
76 not syntactically well formed.
76 not syntactically well formed.
77
77
78 An ``UnsupportedBundleSpecification`` is raised when the compression or
78 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 bundle type/version is not recognized.
79 bundle type/version is not recognized.
80
80
81 Note: this function will likely eventually return a more complex data
81 Note: this function will likely eventually return a more complex data
82 structure, including bundle2 part information.
82 structure, including bundle2 part information.
83 """
83 """
84 def parseparams(s):
84 def parseparams(s):
85 if ';' not in s:
85 if ';' not in s:
86 return s, {}
86 return s, {}
87
87
88 params = {}
88 params = {}
89 version, paramstr = s.split(';', 1)
89 version, paramstr = s.split(';', 1)
90
90
91 for p in paramstr.split(';'):
91 for p in paramstr.split(';'):
92 if '=' not in p:
92 if '=' not in p:
93 raise error.InvalidBundleSpecification(
93 raise error.InvalidBundleSpecification(
94 _('invalid bundle specification: '
94 _('invalid bundle specification: '
95 'missing "=" in parameter: %s') % p)
95 'missing "=" in parameter: %s') % p)
96
96
97 key, value = p.split('=', 1)
97 key, value = p.split('=', 1)
98 key = urlreq.unquote(key)
98 key = urlreq.unquote(key)
99 value = urlreq.unquote(value)
99 value = urlreq.unquote(value)
100 params[key] = value
100 params[key] = value
101
101
102 return version, params
102 return version, params
103
103
104
104
105 if strict and '-' not in spec:
105 if strict and '-' not in spec:
106 raise error.InvalidBundleSpecification(
106 raise error.InvalidBundleSpecification(
107 _('invalid bundle specification; '
107 _('invalid bundle specification; '
108 'must be prefixed with compression: %s') % spec)
108 'must be prefixed with compression: %s') % spec)
109
109
110 if '-' in spec:
110 if '-' in spec:
111 compression, version = spec.split('-', 1)
111 compression, version = spec.split('-', 1)
112
112
113 if compression not in util.compengines.supportedbundlenames:
113 if compression not in util.compengines.supportedbundlenames:
114 raise error.UnsupportedBundleSpecification(
114 raise error.UnsupportedBundleSpecification(
115 _('%s compression is not supported') % compression)
115 _('%s compression is not supported') % compression)
116
116
117 version, params = parseparams(version)
117 version, params = parseparams(version)
118
118
119 if version not in _bundlespeccgversions:
119 if version not in _bundlespeccgversions:
120 raise error.UnsupportedBundleSpecification(
120 raise error.UnsupportedBundleSpecification(
121 _('%s is not a recognized bundle version') % version)
121 _('%s is not a recognized bundle version') % version)
122 else:
122 else:
123 # Value could be just the compression or just the version, in which
123 # Value could be just the compression or just the version, in which
124 # case some defaults are assumed (but only when not in strict mode).
124 # case some defaults are assumed (but only when not in strict mode).
125 assert not strict
125 assert not strict
126
126
127 spec, params = parseparams(spec)
127 spec, params = parseparams(spec)
128
128
129 if spec in util.compengines.supportedbundlenames:
129 if spec in util.compengines.supportedbundlenames:
130 compression = spec
130 compression = spec
131 version = 'v1'
131 version = 'v1'
132 # Generaldelta repos require v2.
132 # Generaldelta repos require v2.
133 if 'generaldelta' in repo.requirements:
133 if 'generaldelta' in repo.requirements:
134 version = 'v2'
134 version = 'v2'
135 # Modern compression engines require v2.
135 # Modern compression engines require v2.
136 if compression not in _bundlespecv1compengines:
136 if compression not in _bundlespecv1compengines:
137 version = 'v2'
137 version = 'v2'
138 elif spec in _bundlespeccgversions:
138 elif spec in _bundlespeccgversions:
139 if spec == 'packed1':
139 if spec == 'packed1':
140 compression = 'none'
140 compression = 'none'
141 else:
141 else:
142 compression = 'bzip2'
142 compression = 'bzip2'
143 version = spec
143 version = spec
144 else:
144 else:
145 raise error.UnsupportedBundleSpecification(
145 raise error.UnsupportedBundleSpecification(
146 _('%s is not a recognized bundle specification') % spec)
146 _('%s is not a recognized bundle specification') % spec)
147
147
148 # Bundle version 1 only supports a known set of compression engines.
148 # Bundle version 1 only supports a known set of compression engines.
149 if version == 'v1' and compression not in _bundlespecv1compengines:
149 if version == 'v1' and compression not in _bundlespecv1compengines:
150 raise error.UnsupportedBundleSpecification(
150 raise error.UnsupportedBundleSpecification(
151 _('compression engine %s is not supported on v1 bundles') %
151 _('compression engine %s is not supported on v1 bundles') %
152 compression)
152 compression)
153
153
154 # The specification for packed1 can optionally declare the data formats
154 # The specification for packed1 can optionally declare the data formats
155 # required to apply it. If we see this metadata, compare against what the
155 # required to apply it. If we see this metadata, compare against what the
156 # repo supports and error if the bundle isn't compatible.
156 # repo supports and error if the bundle isn't compatible.
157 if version == 'packed1' and 'requirements' in params:
157 if version == 'packed1' and 'requirements' in params:
158 requirements = set(params['requirements'].split(','))
158 requirements = set(params['requirements'].split(','))
159 missingreqs = requirements - repo.supportedformats
159 missingreqs = requirements - repo.supportedformats
160 if missingreqs:
160 if missingreqs:
161 raise error.UnsupportedBundleSpecification(
161 raise error.UnsupportedBundleSpecification(
162 _('missing support for repository features: %s') %
162 _('missing support for repository features: %s') %
163 ', '.join(sorted(missingreqs)))
163 ', '.join(sorted(missingreqs)))
164
164
165 if not externalnames:
165 if not externalnames:
166 engine = util.compengines.forbundlename(compression)
166 engine = util.compengines.forbundlename(compression)
167 compression = engine.bundletype()[1]
167 compression = engine.bundletype()[1]
168 version = _bundlespeccgversions[version]
168 version = _bundlespeccgversions[version]
169 return compression, version, params
169 return compression, version, params
170
170
171 def readbundle(ui, fh, fname, vfs=None):
171 def readbundle(ui, fh, fname, vfs=None):
172 header = changegroup.readexactly(fh, 4)
172 header = changegroup.readexactly(fh, 4)
173
173
174 alg = None
174 alg = None
175 if not fname:
175 if not fname:
176 fname = "stream"
176 fname = "stream"
177 if not header.startswith('HG') and header.startswith('\0'):
177 if not header.startswith('HG') and header.startswith('\0'):
178 fh = changegroup.headerlessfixup(fh, header)
178 fh = changegroup.headerlessfixup(fh, header)
179 header = "HG10"
179 header = "HG10"
180 alg = 'UN'
180 alg = 'UN'
181 elif vfs:
181 elif vfs:
182 fname = vfs.join(fname)
182 fname = vfs.join(fname)
183
183
184 magic, version = header[0:2], header[2:4]
184 magic, version = header[0:2], header[2:4]
185
185
186 if magic != 'HG':
186 if magic != 'HG':
187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 if version == '10':
188 if version == '10':
189 if alg is None:
189 if alg is None:
190 alg = changegroup.readexactly(fh, 2)
190 alg = changegroup.readexactly(fh, 2)
191 return changegroup.cg1unpacker(fh, alg)
191 return changegroup.cg1unpacker(fh, alg)
192 elif version.startswith('2'):
192 elif version.startswith('2'):
193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 elif version == 'S1':
194 elif version == 'S1':
195 return streamclone.streamcloneapplier(fh)
195 return streamclone.streamcloneapplier(fh)
196 else:
196 else:
197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198
198
199 def getbundlespec(ui, fh):
199 def getbundlespec(ui, fh):
200 """Infer the bundlespec from a bundle file handle.
200 """Infer the bundlespec from a bundle file handle.
201
201
202 The input file handle is seeked and the original seek position is not
202 The input file handle is seeked and the original seek position is not
203 restored.
203 restored.
204 """
204 """
205 def speccompression(alg):
205 def speccompression(alg):
206 try:
206 try:
207 return util.compengines.forbundletype(alg).bundletype()[0]
207 return util.compengines.forbundletype(alg).bundletype()[0]
208 except KeyError:
208 except KeyError:
209 return None
209 return None
210
210
211 b = readbundle(ui, fh, None)
211 b = readbundle(ui, fh, None)
212 if isinstance(b, changegroup.cg1unpacker):
212 if isinstance(b, changegroup.cg1unpacker):
213 alg = b._type
213 alg = b._type
214 if alg == '_truncatedBZ':
214 if alg == '_truncatedBZ':
215 alg = 'BZ'
215 alg = 'BZ'
216 comp = speccompression(alg)
216 comp = speccompression(alg)
217 if not comp:
217 if not comp:
218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 return '%s-v1' % comp
219 return '%s-v1' % comp
220 elif isinstance(b, bundle2.unbundle20):
220 elif isinstance(b, bundle2.unbundle20):
221 if 'Compression' in b.params:
221 if 'Compression' in b.params:
222 comp = speccompression(b.params['Compression'])
222 comp = speccompression(b.params['Compression'])
223 if not comp:
223 if not comp:
224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 else:
225 else:
226 comp = 'none'
226 comp = 'none'
227
227
228 version = None
228 version = None
229 for part in b.iterparts():
229 for part in b.iterparts():
230 if part.type == 'changegroup':
230 if part.type == 'changegroup':
231 version = part.params['version']
231 version = part.params['version']
232 if version in ('01', '02'):
232 if version in ('01', '02'):
233 version = 'v2'
233 version = 'v2'
234 else:
234 else:
235 raise error.Abort(_('changegroup version %s does not have '
235 raise error.Abort(_('changegroup version %s does not have '
236 'a known bundlespec') % version,
236 'a known bundlespec') % version,
237 hint=_('try upgrading your Mercurial '
237 hint=_('try upgrading your Mercurial '
238 'client'))
238 'client'))
239
239
240 if not version:
240 if not version:
241 raise error.Abort(_('could not identify changegroup version in '
241 raise error.Abort(_('could not identify changegroup version in '
242 'bundle'))
242 'bundle'))
243
243
244 return '%s-%s' % (comp, version)
244 return '%s-%s' % (comp, version)
245 elif isinstance(b, streamclone.streamcloneapplier):
245 elif isinstance(b, streamclone.streamcloneapplier):
246 requirements = streamclone.readbundle1header(fh)[2]
246 requirements = streamclone.readbundle1header(fh)[2]
247 params = 'requirements=%s' % ','.join(sorted(requirements))
247 params = 'requirements=%s' % ','.join(sorted(requirements))
248 return 'none-packed1;%s' % urlreq.quote(params)
248 return 'none-packed1;%s' % urlreq.quote(params)
249 else:
249 else:
250 raise error.Abort(_('unknown bundle type: %s') % b)
250 raise error.Abort(_('unknown bundle type: %s') % b)
251
251
252 def _computeoutgoing(repo, heads, common):
252 def _computeoutgoing(repo, heads, common):
253 """Computes which revs are outgoing given a set of common
253 """Computes which revs are outgoing given a set of common
254 and a set of heads.
254 and a set of heads.
255
255
256 This is a separate function so extensions can have access to
256 This is a separate function so extensions can have access to
257 the logic.
257 the logic.
258
258
259 Returns a discovery.outgoing object.
259 Returns a discovery.outgoing object.
260 """
260 """
261 cl = repo.changelog
261 cl = repo.changelog
262 if common:
262 if common:
263 hasnode = cl.hasnode
263 hasnode = cl.hasnode
264 common = [n for n in common if hasnode(n)]
264 common = [n for n in common if hasnode(n)]
265 else:
265 else:
266 common = [nullid]
266 common = [nullid]
267 if not heads:
267 if not heads:
268 heads = cl.heads()
268 heads = cl.heads()
269 return discovery.outgoing(repo, common, heads)
269 return discovery.outgoing(repo, common, heads)
270
270
271 def _forcebundle1(op):
271 def _forcebundle1(op):
272 """return true if a pull/push must use bundle1
272 """return true if a pull/push must use bundle1
273
273
274 This function is used to allow testing of the older bundle version"""
274 This function is used to allow testing of the older bundle version"""
275 ui = op.repo.ui
275 ui = op.repo.ui
276 forcebundle1 = False
276 forcebundle1 = False
277 # The goal is this config is to allow developer to choose the bundle
277 # The goal is this config is to allow developer to choose the bundle
278 # version used during exchanged. This is especially handy during test.
278 # version used during exchanged. This is especially handy during test.
279 # Value is a list of bundle version to be picked from, highest version
279 # Value is a list of bundle version to be picked from, highest version
280 # should be used.
280 # should be used.
281 #
281 #
282 # developer config: devel.legacy.exchange
282 # developer config: devel.legacy.exchange
283 exchange = ui.configlist('devel', 'legacy.exchange')
283 exchange = ui.configlist('devel', 'legacy.exchange')
284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 return forcebundle1 or not op.remote.capable('bundle2')
285 return forcebundle1 or not op.remote.capable('bundle2')
286
286
287 class pushoperation(object):
287 class pushoperation(object):
288 """A object that represent a single push operation
288 """A object that represent a single push operation
289
289
290 Its purpose is to carry push related state and very common operations.
290 Its purpose is to carry push related state and very common operations.
291
291
292 A new pushoperation should be created at the beginning of each push and
292 A new pushoperation should be created at the beginning of each push and
293 discarded afterward.
293 discarded afterward.
294 """
294 """
295
295
296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 bookmarks=(), pushvars=None):
297 bookmarks=(), pushvars=None):
298 # repo we push from
298 # repo we push from
299 self.repo = repo
299 self.repo = repo
300 self.ui = repo.ui
300 self.ui = repo.ui
301 # repo we push to
301 # repo we push to
302 self.remote = remote
302 self.remote = remote
303 # force option provided
303 # force option provided
304 self.force = force
304 self.force = force
305 # revs to be pushed (None is "all")
305 # revs to be pushed (None is "all")
306 self.revs = revs
306 self.revs = revs
307 # bookmark explicitly pushed
307 # bookmark explicitly pushed
308 self.bookmarks = bookmarks
308 self.bookmarks = bookmarks
309 # allow push of new branch
309 # allow push of new branch
310 self.newbranch = newbranch
310 self.newbranch = newbranch
311 # step already performed
311 # step already performed
312 # (used to check what steps have been already performed through bundle2)
312 # (used to check what steps have been already performed through bundle2)
313 self.stepsdone = set()
313 self.stepsdone = set()
314 # Integer version of the changegroup push result
314 # Integer version of the changegroup push result
315 # - None means nothing to push
315 # - None means nothing to push
316 # - 0 means HTTP error
316 # - 0 means HTTP error
317 # - 1 means we pushed and remote head count is unchanged *or*
317 # - 1 means we pushed and remote head count is unchanged *or*
318 # we have outgoing changesets but refused to push
318 # we have outgoing changesets but refused to push
319 # - other values as described by addchangegroup()
319 # - other values as described by addchangegroup()
320 self.cgresult = None
320 self.cgresult = None
321 # Boolean value for the bookmark push
321 # Boolean value for the bookmark push
322 self.bkresult = None
322 self.bkresult = None
323 # discover.outgoing object (contains common and outgoing data)
323 # discover.outgoing object (contains common and outgoing data)
324 self.outgoing = None
324 self.outgoing = None
325 # all remote topological heads before the push
325 # all remote topological heads before the push
326 self.remoteheads = None
326 self.remoteheads = None
327 # Details of the remote branch pre and post push
327 # Details of the remote branch pre and post push
328 #
328 #
329 # mapping: {'branch': ([remoteheads],
329 # mapping: {'branch': ([remoteheads],
330 # [newheads],
330 # [newheads],
331 # [unsyncedheads],
331 # [unsyncedheads],
332 # [discardedheads])}
332 # [discardedheads])}
333 # - branch: the branch name
333 # - branch: the branch name
334 # - remoteheads: the list of remote heads known locally
334 # - remoteheads: the list of remote heads known locally
335 # None if the branch is new
335 # None if the branch is new
336 # - newheads: the new remote heads (known locally) with outgoing pushed
336 # - newheads: the new remote heads (known locally) with outgoing pushed
337 # - unsyncedheads: the list of remote heads unknown locally.
337 # - unsyncedheads: the list of remote heads unknown locally.
338 # - discardedheads: the list of remote heads made obsolete by the push
338 # - discardedheads: the list of remote heads made obsolete by the push
339 self.pushbranchmap = None
339 self.pushbranchmap = None
340 # testable as a boolean indicating if any nodes are missing locally.
340 # testable as a boolean indicating if any nodes are missing locally.
341 self.incoming = None
341 self.incoming = None
342 # phases changes that must be pushed along side the changesets
342 # phases changes that must be pushed along side the changesets
343 self.outdatedphases = None
343 self.outdatedphases = None
344 # phases changes that must be pushed if changeset push fails
344 # phases changes that must be pushed if changeset push fails
345 self.fallbackoutdatedphases = None
345 self.fallbackoutdatedphases = None
346 # outgoing obsmarkers
346 # outgoing obsmarkers
347 self.outobsmarkers = set()
347 self.outobsmarkers = set()
348 # outgoing bookmarks
348 # outgoing bookmarks
349 self.outbookmarks = []
349 self.outbookmarks = []
350 # transaction manager
350 # transaction manager
351 self.trmanager = None
351 self.trmanager = None
352 # map { pushkey partid -> callback handling failure}
352 # map { pushkey partid -> callback handling failure}
353 # used to handle exception from mandatory pushkey part failure
353 # used to handle exception from mandatory pushkey part failure
354 self.pkfailcb = {}
354 self.pkfailcb = {}
355 # an iterable of pushvars or None
355 # an iterable of pushvars or None
356 self.pushvars = pushvars
356 self.pushvars = pushvars
357
357
358 @util.propertycache
358 @util.propertycache
359 def futureheads(self):
359 def futureheads(self):
360 """future remote heads if the changeset push succeeds"""
360 """future remote heads if the changeset push succeeds"""
361 return self.outgoing.missingheads
361 return self.outgoing.missingheads
362
362
363 @util.propertycache
363 @util.propertycache
364 def fallbackheads(self):
364 def fallbackheads(self):
365 """future remote heads if the changeset push fails"""
365 """future remote heads if the changeset push fails"""
366 if self.revs is None:
366 if self.revs is None:
367 # not target to push, all common are relevant
367 # not target to push, all common are relevant
368 return self.outgoing.commonheads
368 return self.outgoing.commonheads
369 unfi = self.repo.unfiltered()
369 unfi = self.repo.unfiltered()
370 # I want cheads = heads(::missingheads and ::commonheads)
370 # I want cheads = heads(::missingheads and ::commonheads)
371 # (missingheads is revs with secret changeset filtered out)
371 # (missingheads is revs with secret changeset filtered out)
372 #
372 #
373 # This can be expressed as:
373 # This can be expressed as:
374 # cheads = ( (missingheads and ::commonheads)
374 # cheads = ( (missingheads and ::commonheads)
375 # + (commonheads and ::missingheads))"
375 # + (commonheads and ::missingheads))"
376 # )
376 # )
377 #
377 #
378 # while trying to push we already computed the following:
378 # while trying to push we already computed the following:
379 # common = (::commonheads)
379 # common = (::commonheads)
380 # missing = ((commonheads::missingheads) - commonheads)
380 # missing = ((commonheads::missingheads) - commonheads)
381 #
381 #
382 # We can pick:
382 # We can pick:
383 # * missingheads part of common (::commonheads)
383 # * missingheads part of common (::commonheads)
384 common = self.outgoing.common
384 common = self.outgoing.common
385 nm = self.repo.changelog.nodemap
385 nm = self.repo.changelog.nodemap
386 cheads = [node for node in self.revs if nm[node] in common]
386 cheads = [node for node in self.revs if nm[node] in common]
387 # and
387 # and
388 # * commonheads parents on missing
388 # * commonheads parents on missing
389 revset = unfi.set('%ln and parents(roots(%ln))',
389 revset = unfi.set('%ln and parents(roots(%ln))',
390 self.outgoing.commonheads,
390 self.outgoing.commonheads,
391 self.outgoing.missing)
391 self.outgoing.missing)
392 cheads.extend(c.node() for c in revset)
392 cheads.extend(c.node() for c in revset)
393 return cheads
393 return cheads
394
394
395 @property
395 @property
396 def commonheads(self):
396 def commonheads(self):
397 """set of all common heads after changeset bundle push"""
397 """set of all common heads after changeset bundle push"""
398 if self.cgresult:
398 if self.cgresult:
399 return self.futureheads
399 return self.futureheads
400 else:
400 else:
401 return self.fallbackheads
401 return self.fallbackheads
402
402
403 # mapping of message used when pushing bookmark
403 # mapping of message used when pushing bookmark
404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
405 _('updating bookmark %s failed!\n')),
405 _('updating bookmark %s failed!\n')),
406 'export': (_("exporting bookmark %s\n"),
406 'export': (_("exporting bookmark %s\n"),
407 _('exporting bookmark %s failed!\n')),
407 _('exporting bookmark %s failed!\n')),
408 'delete': (_("deleting remote bookmark %s\n"),
408 'delete': (_("deleting remote bookmark %s\n"),
409 _('deleting remote bookmark %s failed!\n')),
409 _('deleting remote bookmark %s failed!\n')),
410 }
410 }
411
411
412
412
413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
414 opargs=None):
414 opargs=None):
415 '''Push outgoing changesets (limited by revs) from a local
415 '''Push outgoing changesets (limited by revs) from a local
416 repository to remote. Return an integer:
416 repository to remote. Return an integer:
417 - None means nothing to push
417 - None means nothing to push
418 - 0 means HTTP error
418 - 0 means HTTP error
419 - 1 means we pushed and remote head count is unchanged *or*
419 - 1 means we pushed and remote head count is unchanged *or*
420 we have outgoing changesets but refused to push
420 we have outgoing changesets but refused to push
421 - other values as described by addchangegroup()
421 - other values as described by addchangegroup()
422 '''
422 '''
423 if opargs is None:
423 if opargs is None:
424 opargs = {}
424 opargs = {}
425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
426 **opargs)
426 **opargs)
427 if pushop.remote.local():
427 if pushop.remote.local():
428 missing = (set(pushop.repo.requirements)
428 missing = (set(pushop.repo.requirements)
429 - pushop.remote.local().supported)
429 - pushop.remote.local().supported)
430 if missing:
430 if missing:
431 msg = _("required features are not"
431 msg = _("required features are not"
432 " supported in the destination:"
432 " supported in the destination:"
433 " %s") % (', '.join(sorted(missing)))
433 " %s") % (', '.join(sorted(missing)))
434 raise error.Abort(msg)
434 raise error.Abort(msg)
435
435
436 if not pushop.remote.canpush():
436 if not pushop.remote.canpush():
437 raise error.Abort(_("destination does not support push"))
437 raise error.Abort(_("destination does not support push"))
438
438
439 if not pushop.remote.capable('unbundle'):
439 if not pushop.remote.capable('unbundle'):
440 raise error.Abort(_('cannot push: destination does not support the '
440 raise error.Abort(_('cannot push: destination does not support the '
441 'unbundle wire protocol command'))
441 'unbundle wire protocol command'))
442
442
443 # get lock as we might write phase data
443 # get lock as we might write phase data
444 wlock = lock = None
444 wlock = lock = None
445 try:
445 try:
446 # bundle2 push may receive a reply bundle touching bookmarks or other
446 # bundle2 push may receive a reply bundle touching bookmarks or other
447 # things requiring the wlock. Take it now to ensure proper ordering.
447 # things requiring the wlock. Take it now to ensure proper ordering.
448 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
448 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
449 if (not _forcebundle1(pushop)) and maypushback:
449 if (not _forcebundle1(pushop)) and maypushback:
450 wlock = pushop.repo.wlock()
450 wlock = pushop.repo.wlock()
451 lock = pushop.repo.lock()
451 lock = pushop.repo.lock()
452 pushop.trmanager = transactionmanager(pushop.repo,
452 pushop.trmanager = transactionmanager(pushop.repo,
453 'push-response',
453 'push-response',
454 pushop.remote.url())
454 pushop.remote.url())
455 except IOError as err:
455 except IOError as err:
456 if err.errno != errno.EACCES:
456 if err.errno != errno.EACCES:
457 raise
457 raise
458 # source repo cannot be locked.
458 # source repo cannot be locked.
459 # We do not abort the push, but just disable the local phase
459 # We do not abort the push, but just disable the local phase
460 # synchronisation.
460 # synchronisation.
461 msg = 'cannot lock source repository: %s\n' % err
461 msg = 'cannot lock source repository: %s\n' % err
462 pushop.ui.debug(msg)
462 pushop.ui.debug(msg)
463
463
464 with wlock or util.nullcontextmanager(), \
464 with wlock or util.nullcontextmanager(), \
465 lock or util.nullcontextmanager(), \
465 lock or util.nullcontextmanager(), \
466 pushop.trmanager or util.nullcontextmanager():
466 pushop.trmanager or util.nullcontextmanager():
467 pushop.repo.checkpush(pushop)
467 pushop.repo.checkpush(pushop)
468 _pushdiscovery(pushop)
468 _pushdiscovery(pushop)
469 if not _forcebundle1(pushop):
469 if not _forcebundle1(pushop):
470 _pushbundle2(pushop)
470 _pushbundle2(pushop)
471 _pushchangeset(pushop)
471 _pushchangeset(pushop)
472 _pushsyncphase(pushop)
472 _pushsyncphase(pushop)
473 _pushobsolete(pushop)
473 _pushobsolete(pushop)
474 _pushbookmark(pushop)
474 _pushbookmark(pushop)
475
475
476 return pushop
476 return pushop
477
477
478 # list of steps to perform discovery before push
478 # list of steps to perform discovery before push
479 pushdiscoveryorder = []
479 pushdiscoveryorder = []
480
480
481 # Mapping between step name and function
481 # Mapping between step name and function
482 #
482 #
483 # This exists to help extensions wrap steps if necessary
483 # This exists to help extensions wrap steps if necessary
484 pushdiscoverymapping = {}
484 pushdiscoverymapping = {}
485
485
486 def pushdiscovery(stepname):
486 def pushdiscovery(stepname):
487 """decorator for function performing discovery before push
487 """decorator for function performing discovery before push
488
488
489 The function is added to the step -> function mapping and appended to the
489 The function is added to the step -> function mapping and appended to the
490 list of steps. Beware that decorated function will be added in order (this
490 list of steps. Beware that decorated function will be added in order (this
491 may matter).
491 may matter).
492
492
493 You can only use this decorator for a new step, if you want to wrap a step
493 You can only use this decorator for a new step, if you want to wrap a step
494 from an extension, change the pushdiscovery dictionary directly."""
494 from an extension, change the pushdiscovery dictionary directly."""
495 def dec(func):
495 def dec(func):
496 assert stepname not in pushdiscoverymapping
496 assert stepname not in pushdiscoverymapping
497 pushdiscoverymapping[stepname] = func
497 pushdiscoverymapping[stepname] = func
498 pushdiscoveryorder.append(stepname)
498 pushdiscoveryorder.append(stepname)
499 return func
499 return func
500 return dec
500 return dec
501
501
502 def _pushdiscovery(pushop):
502 def _pushdiscovery(pushop):
503 """Run all discovery steps"""
503 """Run all discovery steps"""
504 for stepname in pushdiscoveryorder:
504 for stepname in pushdiscoveryorder:
505 step = pushdiscoverymapping[stepname]
505 step = pushdiscoverymapping[stepname]
506 step(pushop)
506 step(pushop)
507
507
508 @pushdiscovery('changeset')
508 @pushdiscovery('changeset')
509 def _pushdiscoverychangeset(pushop):
509 def _pushdiscoverychangeset(pushop):
510 """discover the changeset that need to be pushed"""
510 """discover the changeset that need to be pushed"""
511 fci = discovery.findcommonincoming
511 fci = discovery.findcommonincoming
512 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
512 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
513 common, inc, remoteheads = commoninc
513 common, inc, remoteheads = commoninc
514 fco = discovery.findcommonoutgoing
514 fco = discovery.findcommonoutgoing
515 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
515 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
516 commoninc=commoninc, force=pushop.force)
516 commoninc=commoninc, force=pushop.force)
517 pushop.outgoing = outgoing
517 pushop.outgoing = outgoing
518 pushop.remoteheads = remoteheads
518 pushop.remoteheads = remoteheads
519 pushop.incoming = inc
519 pushop.incoming = inc
520
520
521 @pushdiscovery('phase')
521 @pushdiscovery('phase')
522 def _pushdiscoveryphase(pushop):
522 def _pushdiscoveryphase(pushop):
523 """discover the phase that needs to be pushed
523 """discover the phase that needs to be pushed
524
524
525 (computed for both success and failure case for changesets push)"""
525 (computed for both success and failure case for changesets push)"""
526 outgoing = pushop.outgoing
526 outgoing = pushop.outgoing
527 unfi = pushop.repo.unfiltered()
527 unfi = pushop.repo.unfiltered()
528 remotephases = pushop.remote.listkeys('phases')
528 remotephases = pushop.remote.listkeys('phases')
529 publishing = remotephases.get('publishing', False)
529 publishing = remotephases.get('publishing', False)
530 if (pushop.ui.configbool('ui', '_usedassubrepo')
530 if (pushop.ui.configbool('ui', '_usedassubrepo')
531 and remotephases # server supports phases
531 and remotephases # server supports phases
532 and not pushop.outgoing.missing # no changesets to be pushed
532 and not pushop.outgoing.missing # no changesets to be pushed
533 and publishing):
533 and publishing):
534 # When:
534 # When:
535 # - this is a subrepo push
535 # - this is a subrepo push
536 # - and remote support phase
536 # - and remote support phase
537 # - and no changeset are to be pushed
537 # - and no changeset are to be pushed
538 # - and remote is publishing
538 # - and remote is publishing
539 # We may be in issue 3871 case!
539 # We may be in issue 3871 case!
540 # We drop the possible phase synchronisation done by
540 # We drop the possible phase synchronisation done by
541 # courtesy to publish changesets possibly locally draft
541 # courtesy to publish changesets possibly locally draft
542 # on the remote.
542 # on the remote.
543 remotephases = {'publishing': 'True'}
543 remotephases = {'publishing': 'True'}
544 ana = phases.analyzeremotephases(pushop.repo,
544 ana = phases.analyzeremotephases(pushop.repo,
545 pushop.fallbackheads,
545 pushop.fallbackheads,
546 remotephases)
546 remotephases)
547 pheads, droots = ana
547 pheads, droots = ana
548 extracond = ''
548 extracond = ''
549 if not publishing:
549 if not publishing:
550 extracond = ' and public()'
550 extracond = ' and public()'
551 revset = 'heads((%%ln::%%ln) %s)' % extracond
551 revset = 'heads((%%ln::%%ln) %s)' % extracond
552 # Get the list of all revs draft on remote by public here.
552 # Get the list of all revs draft on remote by public here.
553 # XXX Beware that revset break if droots is not strictly
553 # XXX Beware that revset break if droots is not strictly
554 # XXX root we may want to ensure it is but it is costly
554 # XXX root we may want to ensure it is but it is costly
555 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
555 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
556 if not outgoing.missing:
556 if not outgoing.missing:
557 future = fallback
557 future = fallback
558 else:
558 else:
559 # adds changeset we are going to push as draft
559 # adds changeset we are going to push as draft
560 #
560 #
561 # should not be necessary for publishing server, but because of an
561 # should not be necessary for publishing server, but because of an
562 # issue fixed in xxxxx we have to do it anyway.
562 # issue fixed in xxxxx we have to do it anyway.
563 fdroots = list(unfi.set('roots(%ln + %ln::)',
563 fdroots = list(unfi.set('roots(%ln + %ln::)',
564 outgoing.missing, droots))
564 outgoing.missing, droots))
565 fdroots = [f.node() for f in fdroots]
565 fdroots = [f.node() for f in fdroots]
566 future = list(unfi.set(revset, fdroots, pushop.futureheads))
566 future = list(unfi.set(revset, fdroots, pushop.futureheads))
567 pushop.outdatedphases = future
567 pushop.outdatedphases = future
568 pushop.fallbackoutdatedphases = fallback
568 pushop.fallbackoutdatedphases = fallback
569
569
570 @pushdiscovery('obsmarker')
570 @pushdiscovery('obsmarker')
571 def _pushdiscoveryobsmarkers(pushop):
571 def _pushdiscoveryobsmarkers(pushop):
572 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
572 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
573 and pushop.repo.obsstore
573 and pushop.repo.obsstore
574 and 'obsolete' in pushop.remote.listkeys('namespaces')):
574 and 'obsolete' in pushop.remote.listkeys('namespaces')):
575 repo = pushop.repo
575 repo = pushop.repo
576 # very naive computation, that can be quite expensive on big repo.
576 # very naive computation, that can be quite expensive on big repo.
577 # However: evolution is currently slow on them anyway.
577 # However: evolution is currently slow on them anyway.
578 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
578 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
579 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
579 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
580
580
581 @pushdiscovery('bookmarks')
581 @pushdiscovery('bookmarks')
582 def _pushdiscoverybookmarks(pushop):
582 def _pushdiscoverybookmarks(pushop):
583 ui = pushop.ui
583 ui = pushop.ui
584 repo = pushop.repo.unfiltered()
584 repo = pushop.repo.unfiltered()
585 remote = pushop.remote
585 remote = pushop.remote
586 ui.debug("checking for updated bookmarks\n")
586 ui.debug("checking for updated bookmarks\n")
587 ancestors = ()
587 ancestors = ()
588 if pushop.revs:
588 if pushop.revs:
589 revnums = map(repo.changelog.rev, pushop.revs)
589 revnums = map(repo.changelog.rev, pushop.revs)
590 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
590 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
591 remotebookmark = remote.listkeys('bookmarks')
591 remotebookmark = remote.listkeys('bookmarks')
592
592
593 explicit = set([repo._bookmarks.expandname(bookmark)
593 explicit = set([repo._bookmarks.expandname(bookmark)
594 for bookmark in pushop.bookmarks])
594 for bookmark in pushop.bookmarks])
595
595
596 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
596 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
597 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
597 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
598
598
599 def safehex(x):
599 def safehex(x):
600 if x is None:
600 if x is None:
601 return x
601 return x
602 return hex(x)
602 return hex(x)
603
603
604 def hexifycompbookmarks(bookmarks):
604 def hexifycompbookmarks(bookmarks):
605 for b, scid, dcid in bookmarks:
605 for b, scid, dcid in bookmarks:
606 yield b, safehex(scid), safehex(dcid)
606 yield b, safehex(scid), safehex(dcid)
607
607
608 comp = [hexifycompbookmarks(marks) for marks in comp]
608 comp = [hexifycompbookmarks(marks) for marks in comp]
609 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
609 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
610
610
611 for b, scid, dcid in advsrc:
611 for b, scid, dcid in advsrc:
612 if b in explicit:
612 if b in explicit:
613 explicit.remove(b)
613 explicit.remove(b)
614 if not ancestors or repo[scid].rev() in ancestors:
614 if not ancestors or repo[scid].rev() in ancestors:
615 pushop.outbookmarks.append((b, dcid, scid))
615 pushop.outbookmarks.append((b, dcid, scid))
616 # search added bookmark
616 # search added bookmark
617 for b, scid, dcid in addsrc:
617 for b, scid, dcid in addsrc:
618 if b in explicit:
618 if b in explicit:
619 explicit.remove(b)
619 explicit.remove(b)
620 pushop.outbookmarks.append((b, '', scid))
620 pushop.outbookmarks.append((b, '', scid))
621 # search for overwritten bookmark
621 # search for overwritten bookmark
622 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
622 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
623 if b in explicit:
623 if b in explicit:
624 explicit.remove(b)
624 explicit.remove(b)
625 pushop.outbookmarks.append((b, dcid, scid))
625 pushop.outbookmarks.append((b, dcid, scid))
626 # search for bookmark to delete
626 # search for bookmark to delete
627 for b, scid, dcid in adddst:
627 for b, scid, dcid in adddst:
628 if b in explicit:
628 if b in explicit:
629 explicit.remove(b)
629 explicit.remove(b)
630 # treat as "deleted locally"
630 # treat as "deleted locally"
631 pushop.outbookmarks.append((b, dcid, ''))
631 pushop.outbookmarks.append((b, dcid, ''))
632 # identical bookmarks shouldn't get reported
632 # identical bookmarks shouldn't get reported
633 for b, scid, dcid in same:
633 for b, scid, dcid in same:
634 if b in explicit:
634 if b in explicit:
635 explicit.remove(b)
635 explicit.remove(b)
636
636
637 if explicit:
637 if explicit:
638 explicit = sorted(explicit)
638 explicit = sorted(explicit)
639 # we should probably list all of them
639 # we should probably list all of them
640 ui.warn(_('bookmark %s does not exist on the local '
640 ui.warn(_('bookmark %s does not exist on the local '
641 'or remote repository!\n') % explicit[0])
641 'or remote repository!\n') % explicit[0])
642 pushop.bkresult = 2
642 pushop.bkresult = 2
643
643
644 pushop.outbookmarks.sort()
644 pushop.outbookmarks.sort()
645
645
646 def _pushcheckoutgoing(pushop):
646 def _pushcheckoutgoing(pushop):
647 outgoing = pushop.outgoing
647 outgoing = pushop.outgoing
648 unfi = pushop.repo.unfiltered()
648 unfi = pushop.repo.unfiltered()
649 if not outgoing.missing:
649 if not outgoing.missing:
650 # nothing to push
650 # nothing to push
651 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
651 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
652 return False
652 return False
653 # something to push
653 # something to push
654 if not pushop.force:
654 if not pushop.force:
655 # if repo.obsstore == False --> no obsolete
655 # if repo.obsstore == False --> no obsolete
656 # then, save the iteration
656 # then, save the iteration
657 if unfi.obsstore:
657 if unfi.obsstore:
658 # this message are here for 80 char limit reason
658 # this message are here for 80 char limit reason
659 mso = _("push includes obsolete changeset: %s!")
659 mso = _("push includes obsolete changeset: %s!")
660 mspd = _("push includes phase-divergent changeset: %s!")
660 mspd = _("push includes phase-divergent changeset: %s!")
661 mscd = _("push includes content-divergent changeset: %s!")
661 mscd = _("push includes content-divergent changeset: %s!")
662 mst = {"orphan": _("push includes orphan changeset: %s!"),
662 mst = {"orphan": _("push includes orphan changeset: %s!"),
663 "phase-divergent": mspd,
663 "phase-divergent": mspd,
664 "content-divergent": mscd}
664 "content-divergent": mscd}
665 # If we are to push if there is at least one
665 # If we are to push if there is at least one
666 # obsolete or unstable changeset in missing, at
666 # obsolete or unstable changeset in missing, at
667 # least one of the missinghead will be obsolete or
667 # least one of the missinghead will be obsolete or
668 # unstable. So checking heads only is ok
668 # unstable. So checking heads only is ok
669 for node in outgoing.missingheads:
669 for node in outgoing.missingheads:
670 ctx = unfi[node]
670 ctx = unfi[node]
671 if ctx.obsolete():
671 if ctx.obsolete():
672 raise error.Abort(mso % ctx)
672 raise error.Abort(mso % ctx)
673 elif ctx.isunstable():
673 elif ctx.isunstable():
674 # TODO print more than one instability in the abort
674 # TODO print more than one instability in the abort
675 # message
675 # message
676 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
676 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
677
677
678 discovery.checkheads(pushop)
678 discovery.checkheads(pushop)
679 return True
679 return True
680
680
681 # List of names of steps to perform for an outgoing bundle2, order matters.
681 # List of names of steps to perform for an outgoing bundle2, order matters.
682 b2partsgenorder = []
682 b2partsgenorder = []
683
683
684 # Mapping between step name and function
684 # Mapping between step name and function
685 #
685 #
686 # This exists to help extensions wrap steps if necessary
686 # This exists to help extensions wrap steps if necessary
687 b2partsgenmapping = {}
687 b2partsgenmapping = {}
688
688
689 def b2partsgenerator(stepname, idx=None):
689 def b2partsgenerator(stepname, idx=None):
690 """decorator for function generating bundle2 part
690 """decorator for function generating bundle2 part
691
691
692 The function is added to the step -> function mapping and appended to the
692 The function is added to the step -> function mapping and appended to the
693 list of steps. Beware that decorated functions will be added in order
693 list of steps. Beware that decorated functions will be added in order
694 (this may matter).
694 (this may matter).
695
695
696 You can only use this decorator for new steps, if you want to wrap a step
696 You can only use this decorator for new steps, if you want to wrap a step
697 from an extension, attack the b2partsgenmapping dictionary directly."""
697 from an extension, attack the b2partsgenmapping dictionary directly."""
698 def dec(func):
698 def dec(func):
699 assert stepname not in b2partsgenmapping
699 assert stepname not in b2partsgenmapping
700 b2partsgenmapping[stepname] = func
700 b2partsgenmapping[stepname] = func
701 if idx is None:
701 if idx is None:
702 b2partsgenorder.append(stepname)
702 b2partsgenorder.append(stepname)
703 else:
703 else:
704 b2partsgenorder.insert(idx, stepname)
704 b2partsgenorder.insert(idx, stepname)
705 return func
705 return func
706 return dec
706 return dec
707
707
708 def _pushb2ctxcheckheads(pushop, bundler):
708 def _pushb2ctxcheckheads(pushop, bundler):
709 """Generate race condition checking parts
709 """Generate race condition checking parts
710
710
711 Exists as an independent function to aid extensions
711 Exists as an independent function to aid extensions
712 """
712 """
713 # * 'force' do not check for push race,
713 # * 'force' do not check for push race,
714 # * if we don't push anything, there are nothing to check.
714 # * if we don't push anything, there are nothing to check.
715 if not pushop.force and pushop.outgoing.missingheads:
715 if not pushop.force and pushop.outgoing.missingheads:
716 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
716 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
717 emptyremote = pushop.pushbranchmap is None
717 emptyremote = pushop.pushbranchmap is None
718 if not allowunrelated or emptyremote:
718 if not allowunrelated or emptyremote:
719 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
719 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
720 else:
720 else:
721 affected = set()
721 affected = set()
722 for branch, heads in pushop.pushbranchmap.iteritems():
722 for branch, heads in pushop.pushbranchmap.iteritems():
723 remoteheads, newheads, unsyncedheads, discardedheads = heads
723 remoteheads, newheads, unsyncedheads, discardedheads = heads
724 if remoteheads is not None:
724 if remoteheads is not None:
725 remote = set(remoteheads)
725 remote = set(remoteheads)
726 affected |= set(discardedheads) & remote
726 affected |= set(discardedheads) & remote
727 affected |= remote - set(newheads)
727 affected |= remote - set(newheads)
728 if affected:
728 if affected:
729 data = iter(sorted(affected))
729 data = iter(sorted(affected))
730 bundler.newpart('check:updated-heads', data=data)
730 bundler.newpart('check:updated-heads', data=data)
731
731
732 @b2partsgenerator('changeset')
732 @b2partsgenerator('changeset')
733 def _pushb2ctx(pushop, bundler):
733 def _pushb2ctx(pushop, bundler):
734 """handle changegroup push through bundle2
734 """handle changegroup push through bundle2
735
735
736 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
736 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
737 """
737 """
738 if 'changesets' in pushop.stepsdone:
738 if 'changesets' in pushop.stepsdone:
739 return
739 return
740 pushop.stepsdone.add('changesets')
740 pushop.stepsdone.add('changesets')
741 # Send known heads to the server for race detection.
741 # Send known heads to the server for race detection.
742 if not _pushcheckoutgoing(pushop):
742 if not _pushcheckoutgoing(pushop):
743 return
743 return
744 pushop.repo.prepushoutgoinghooks(pushop)
744 pushop.repo.prepushoutgoinghooks(pushop)
745
745
746 _pushb2ctxcheckheads(pushop, bundler)
746 _pushb2ctxcheckheads(pushop, bundler)
747
747
748 b2caps = bundle2.bundle2caps(pushop.remote)
748 b2caps = bundle2.bundle2caps(pushop.remote)
749 version = '01'
749 version = '01'
750 cgversions = b2caps.get('changegroup')
750 cgversions = b2caps.get('changegroup')
751 if cgversions: # 3.1 and 3.2 ship with an empty value
751 if cgversions: # 3.1 and 3.2 ship with an empty value
752 cgversions = [v for v in cgversions
752 cgversions = [v for v in cgversions
753 if v in changegroup.supportedoutgoingversions(
753 if v in changegroup.supportedoutgoingversions(
754 pushop.repo)]
754 pushop.repo)]
755 if not cgversions:
755 if not cgversions:
756 raise ValueError(_('no common changegroup version'))
756 raise ValueError(_('no common changegroup version'))
757 version = max(cgversions)
757 version = max(cgversions)
758 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
758 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
759 pushop.outgoing,
759 pushop.outgoing,
760 version=version)
760 version=version)
761 cgpart = bundler.newpart('changegroup', data=cg)
761 cgpart = bundler.newpart('changegroup', data=cg)
762 if cgversions:
762 if cgversions:
763 cgpart.addparam('version', version)
763 cgpart.addparam('version', version)
764 if 'treemanifest' in pushop.repo.requirements:
764 if 'treemanifest' in pushop.repo.requirements:
765 cgpart.addparam('treemanifest', '1')
765 cgpart.addparam('treemanifest', '1')
766 def handlereply(op):
766 def handlereply(op):
767 """extract addchangegroup returns from server reply"""
767 """extract addchangegroup returns from server reply"""
768 cgreplies = op.records.getreplies(cgpart.id)
768 cgreplies = op.records.getreplies(cgpart.id)
769 assert len(cgreplies['changegroup']) == 1
769 assert len(cgreplies['changegroup']) == 1
770 pushop.cgresult = cgreplies['changegroup'][0]['return']
770 pushop.cgresult = cgreplies['changegroup'][0]['return']
771 return handlereply
771 return handlereply
772
772
773 @b2partsgenerator('phase')
773 @b2partsgenerator('phase')
774 def _pushb2phases(pushop, bundler):
774 def _pushb2phases(pushop, bundler):
775 """handle phase push through bundle2"""
775 """handle phase push through bundle2"""
776 if 'phases' in pushop.stepsdone:
776 if 'phases' in pushop.stepsdone:
777 return
777 return
778 b2caps = bundle2.bundle2caps(pushop.remote)
778 b2caps = bundle2.bundle2caps(pushop.remote)
779 if not 'pushkey' in b2caps:
779 if not 'pushkey' in b2caps:
780 return
780 return
781 pushop.stepsdone.add('phases')
781 pushop.stepsdone.add('phases')
782 part2node = []
782 part2node = []
783
783
784 def handlefailure(pushop, exc):
784 def handlefailure(pushop, exc):
785 targetid = int(exc.partid)
785 targetid = int(exc.partid)
786 for partid, node in part2node:
786 for partid, node in part2node:
787 if partid == targetid:
787 if partid == targetid:
788 raise error.Abort(_('updating %s to public failed') % node)
788 raise error.Abort(_('updating %s to public failed') % node)
789
789
790 enc = pushkey.encode
790 enc = pushkey.encode
791 for newremotehead in pushop.outdatedphases:
791 for newremotehead in pushop.outdatedphases:
792 part = bundler.newpart('pushkey')
792 part = bundler.newpart('pushkey')
793 part.addparam('namespace', enc('phases'))
793 part.addparam('namespace', enc('phases'))
794 part.addparam('key', enc(newremotehead.hex()))
794 part.addparam('key', enc(newremotehead.hex()))
795 part.addparam('old', enc(str(phases.draft)))
795 part.addparam('old', enc(str(phases.draft)))
796 part.addparam('new', enc(str(phases.public)))
796 part.addparam('new', enc(str(phases.public)))
797 part2node.append((part.id, newremotehead))
797 part2node.append((part.id, newremotehead))
798 pushop.pkfailcb[part.id] = handlefailure
798 pushop.pkfailcb[part.id] = handlefailure
799
799
800 def handlereply(op):
800 def handlereply(op):
801 for partid, node in part2node:
801 for partid, node in part2node:
802 partrep = op.records.getreplies(partid)
802 partrep = op.records.getreplies(partid)
803 results = partrep['pushkey']
803 results = partrep['pushkey']
804 assert len(results) <= 1
804 assert len(results) <= 1
805 msg = None
805 msg = None
806 if not results:
806 if not results:
807 msg = _('server ignored update of %s to public!\n') % node
807 msg = _('server ignored update of %s to public!\n') % node
808 elif not int(results[0]['return']):
808 elif not int(results[0]['return']):
809 msg = _('updating %s to public failed!\n') % node
809 msg = _('updating %s to public failed!\n') % node
810 if msg is not None:
810 if msg is not None:
811 pushop.ui.warn(msg)
811 pushop.ui.warn(msg)
812 return handlereply
812 return handlereply
813
813
814 @b2partsgenerator('obsmarkers')
814 @b2partsgenerator('obsmarkers')
815 def _pushb2obsmarkers(pushop, bundler):
815 def _pushb2obsmarkers(pushop, bundler):
816 if 'obsmarkers' in pushop.stepsdone:
816 if 'obsmarkers' in pushop.stepsdone:
817 return
817 return
818 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
818 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
819 if obsolete.commonversion(remoteversions) is None:
819 if obsolete.commonversion(remoteversions) is None:
820 return
820 return
821 pushop.stepsdone.add('obsmarkers')
821 pushop.stepsdone.add('obsmarkers')
822 if pushop.outobsmarkers:
822 if pushop.outobsmarkers:
823 markers = sorted(pushop.outobsmarkers)
823 markers = sorted(pushop.outobsmarkers)
824 bundle2.buildobsmarkerspart(bundler, markers)
824 bundle2.buildobsmarkerspart(bundler, markers)
825
825
826 @b2partsgenerator('bookmarks')
826 @b2partsgenerator('bookmarks')
827 def _pushb2bookmarks(pushop, bundler):
827 def _pushb2bookmarks(pushop, bundler):
828 """handle bookmark push through bundle2"""
828 """handle bookmark push through bundle2"""
829 if 'bookmarks' in pushop.stepsdone:
829 if 'bookmarks' in pushop.stepsdone:
830 return
830 return
831 b2caps = bundle2.bundle2caps(pushop.remote)
831 b2caps = bundle2.bundle2caps(pushop.remote)
832 if 'pushkey' not in b2caps:
832 if 'pushkey' not in b2caps:
833 return
833 return
834 pushop.stepsdone.add('bookmarks')
834 pushop.stepsdone.add('bookmarks')
835 part2book = []
835 part2book = []
836 enc = pushkey.encode
836 enc = pushkey.encode
837
837
838 def handlefailure(pushop, exc):
838 def handlefailure(pushop, exc):
839 targetid = int(exc.partid)
839 targetid = int(exc.partid)
840 for partid, book, action in part2book:
840 for partid, book, action in part2book:
841 if partid == targetid:
841 if partid == targetid:
842 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
842 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
843 # we should not be called for part we did not generated
843 # we should not be called for part we did not generated
844 assert False
844 assert False
845
845
846 for book, old, new in pushop.outbookmarks:
846 for book, old, new in pushop.outbookmarks:
847 part = bundler.newpart('pushkey')
847 part = bundler.newpart('pushkey')
848 part.addparam('namespace', enc('bookmarks'))
848 part.addparam('namespace', enc('bookmarks'))
849 part.addparam('key', enc(book))
849 part.addparam('key', enc(book))
850 part.addparam('old', enc(old))
850 part.addparam('old', enc(old))
851 part.addparam('new', enc(new))
851 part.addparam('new', enc(new))
852 action = 'update'
852 action = 'update'
853 if not old:
853 if not old:
854 action = 'export'
854 action = 'export'
855 elif not new:
855 elif not new:
856 action = 'delete'
856 action = 'delete'
857 part2book.append((part.id, book, action))
857 part2book.append((part.id, book, action))
858 pushop.pkfailcb[part.id] = handlefailure
858 pushop.pkfailcb[part.id] = handlefailure
859
859
860 def handlereply(op):
860 def handlereply(op):
861 ui = pushop.ui
861 ui = pushop.ui
862 for partid, book, action in part2book:
862 for partid, book, action in part2book:
863 partrep = op.records.getreplies(partid)
863 partrep = op.records.getreplies(partid)
864 results = partrep['pushkey']
864 results = partrep['pushkey']
865 assert len(results) <= 1
865 assert len(results) <= 1
866 if not results:
866 if not results:
867 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
867 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
868 else:
868 else:
869 ret = int(results[0]['return'])
869 ret = int(results[0]['return'])
870 if ret:
870 if ret:
871 ui.status(bookmsgmap[action][0] % book)
871 ui.status(bookmsgmap[action][0] % book)
872 else:
872 else:
873 ui.warn(bookmsgmap[action][1] % book)
873 ui.warn(bookmsgmap[action][1] % book)
874 if pushop.bkresult is not None:
874 if pushop.bkresult is not None:
875 pushop.bkresult = 1
875 pushop.bkresult = 1
876 return handlereply
876 return handlereply
877
877
878 @b2partsgenerator('pushvars', idx=0)
878 @b2partsgenerator('pushvars', idx=0)
879 def _getbundlesendvars(pushop, bundler):
879 def _getbundlesendvars(pushop, bundler):
880 '''send shellvars via bundle2'''
880 '''send shellvars via bundle2'''
881 pushvars = pushop.pushvars
881 pushvars = pushop.pushvars
882 if pushvars:
882 if pushvars:
883 shellvars = {}
883 shellvars = {}
884 for raw in pushvars:
884 for raw in pushvars:
885 if '=' not in raw:
885 if '=' not in raw:
886 msg = ("unable to parse variable '%s', should follow "
886 msg = ("unable to parse variable '%s', should follow "
887 "'KEY=VALUE' or 'KEY=' format")
887 "'KEY=VALUE' or 'KEY=' format")
888 raise error.Abort(msg % raw)
888 raise error.Abort(msg % raw)
889 k, v = raw.split('=', 1)
889 k, v = raw.split('=', 1)
890 shellvars[k] = v
890 shellvars[k] = v
891
891
892 part = bundler.newpart('pushvars')
892 part = bundler.newpart('pushvars')
893
893
894 for key, value in shellvars.iteritems():
894 for key, value in shellvars.iteritems():
895 part.addparam(key, value, mandatory=False)
895 part.addparam(key, value, mandatory=False)
896
896
897 def _pushbundle2(pushop):
897 def _pushbundle2(pushop):
898 """push data to the remote using bundle2
898 """push data to the remote using bundle2
899
899
900 The only currently supported type of data is changegroup but this will
900 The only currently supported type of data is changegroup but this will
901 evolve in the future."""
901 evolve in the future."""
902 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
902 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
903 pushback = (pushop.trmanager
903 pushback = (pushop.trmanager
904 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
904 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
905
905
906 # create reply capability
906 # create reply capability
907 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
907 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
908 allowpushback=pushback))
908 allowpushback=pushback))
909 bundler.newpart('replycaps', data=capsblob)
909 bundler.newpart('replycaps', data=capsblob)
910 replyhandlers = []
910 replyhandlers = []
911 for partgenname in b2partsgenorder:
911 for partgenname in b2partsgenorder:
912 partgen = b2partsgenmapping[partgenname]
912 partgen = b2partsgenmapping[partgenname]
913 ret = partgen(pushop, bundler)
913 ret = partgen(pushop, bundler)
914 if callable(ret):
914 if callable(ret):
915 replyhandlers.append(ret)
915 replyhandlers.append(ret)
916 # do not push if nothing to push
916 # do not push if nothing to push
917 if bundler.nbparts <= 1:
917 if bundler.nbparts <= 1:
918 return
918 return
919 stream = util.chunkbuffer(bundler.getchunks())
919 stream = util.chunkbuffer(bundler.getchunks())
920 try:
920 try:
921 try:
921 try:
922 reply = pushop.remote.unbundle(
922 reply = pushop.remote.unbundle(
923 stream, ['force'], pushop.remote.url())
923 stream, ['force'], pushop.remote.url())
924 except error.BundleValueError as exc:
924 except error.BundleValueError as exc:
925 raise error.Abort(_('missing support for %s') % exc)
925 raise error.Abort(_('missing support for %s') % exc)
926 try:
926 try:
927 trgetter = None
927 trgetter = None
928 if pushback:
928 if pushback:
929 trgetter = pushop.trmanager.transaction
929 trgetter = pushop.trmanager.transaction
930 op = bundle2.processbundle(pushop.repo, reply, trgetter)
930 op = bundle2.processbundle(pushop.repo, reply, trgetter)
931 except error.BundleValueError as exc:
931 except error.BundleValueError as exc:
932 raise error.Abort(_('missing support for %s') % exc)
932 raise error.Abort(_('missing support for %s') % exc)
933 except bundle2.AbortFromPart as exc:
933 except bundle2.AbortFromPart as exc:
934 pushop.ui.status(_('remote: %s\n') % exc)
934 pushop.ui.status(_('remote: %s\n') % exc)
935 if exc.hint is not None:
935 if exc.hint is not None:
936 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
936 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
937 raise error.Abort(_('push failed on remote'))
937 raise error.Abort(_('push failed on remote'))
938 except error.PushkeyFailed as exc:
938 except error.PushkeyFailed as exc:
939 partid = int(exc.partid)
939 partid = int(exc.partid)
940 if partid not in pushop.pkfailcb:
940 if partid not in pushop.pkfailcb:
941 raise
941 raise
942 pushop.pkfailcb[partid](pushop, exc)
942 pushop.pkfailcb[partid](pushop, exc)
943 for rephand in replyhandlers:
943 for rephand in replyhandlers:
944 rephand(op)
944 rephand(op)
945
945
946 def _pushchangeset(pushop):
946 def _pushchangeset(pushop):
947 """Make the actual push of changeset bundle to remote repo"""
947 """Make the actual push of changeset bundle to remote repo"""
948 if 'changesets' in pushop.stepsdone:
948 if 'changesets' in pushop.stepsdone:
949 return
949 return
950 pushop.stepsdone.add('changesets')
950 pushop.stepsdone.add('changesets')
951 if not _pushcheckoutgoing(pushop):
951 if not _pushcheckoutgoing(pushop):
952 return
952 return
953
953
954 # Should have verified this in push().
954 # Should have verified this in push().
955 assert pushop.remote.capable('unbundle')
955 assert pushop.remote.capable('unbundle')
956
956
957 pushop.repo.prepushoutgoinghooks(pushop)
957 pushop.repo.prepushoutgoinghooks(pushop)
958 outgoing = pushop.outgoing
958 outgoing = pushop.outgoing
959 # TODO: get bundlecaps from remote
959 # TODO: get bundlecaps from remote
960 bundlecaps = None
960 bundlecaps = None
961 # create a changegroup from local
961 # create a changegroup from local
962 if pushop.revs is None and not (outgoing.excluded
962 if pushop.revs is None and not (outgoing.excluded
963 or pushop.repo.changelog.filteredrevs):
963 or pushop.repo.changelog.filteredrevs):
964 # push everything,
964 # push everything,
965 # use the fast path, no race possible on push
965 # use the fast path, no race possible on push
966 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
966 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
967 cg = changegroup.getsubset(pushop.repo,
967 fastpath=True, bundlecaps=bundlecaps)
968 outgoing,
969 bundler,
970 'push',
971 fastpath=True)
972 else:
968 else:
973 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
969 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
974 bundlecaps=bundlecaps)
970 bundlecaps=bundlecaps)
975
971
976 # apply changegroup to remote
972 # apply changegroup to remote
977 # local repo finds heads on server, finds out what
973 # local repo finds heads on server, finds out what
978 # revs it must push. once revs transferred, if server
974 # revs it must push. once revs transferred, if server
979 # finds it has different heads (someone else won
975 # finds it has different heads (someone else won
980 # commit/push race), server aborts.
976 # commit/push race), server aborts.
981 if pushop.force:
977 if pushop.force:
982 remoteheads = ['force']
978 remoteheads = ['force']
983 else:
979 else:
984 remoteheads = pushop.remoteheads
980 remoteheads = pushop.remoteheads
985 # ssh: return remote's addchangegroup()
981 # ssh: return remote's addchangegroup()
986 # http: return remote's addchangegroup() or 0 for error
982 # http: return remote's addchangegroup() or 0 for error
987 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
983 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
988 pushop.repo.url())
984 pushop.repo.url())
989
985
990 def _pushsyncphase(pushop):
986 def _pushsyncphase(pushop):
991 """synchronise phase information locally and remotely"""
987 """synchronise phase information locally and remotely"""
992 cheads = pushop.commonheads
988 cheads = pushop.commonheads
993 # even when we don't push, exchanging phase data is useful
989 # even when we don't push, exchanging phase data is useful
994 remotephases = pushop.remote.listkeys('phases')
990 remotephases = pushop.remote.listkeys('phases')
995 if (pushop.ui.configbool('ui', '_usedassubrepo')
991 if (pushop.ui.configbool('ui', '_usedassubrepo')
996 and remotephases # server supports phases
992 and remotephases # server supports phases
997 and pushop.cgresult is None # nothing was pushed
993 and pushop.cgresult is None # nothing was pushed
998 and remotephases.get('publishing', False)):
994 and remotephases.get('publishing', False)):
999 # When:
995 # When:
1000 # - this is a subrepo push
996 # - this is a subrepo push
1001 # - and remote support phase
997 # - and remote support phase
1002 # - and no changeset was pushed
998 # - and no changeset was pushed
1003 # - and remote is publishing
999 # - and remote is publishing
1004 # We may be in issue 3871 case!
1000 # We may be in issue 3871 case!
1005 # We drop the possible phase synchronisation done by
1001 # We drop the possible phase synchronisation done by
1006 # courtesy to publish changesets possibly locally draft
1002 # courtesy to publish changesets possibly locally draft
1007 # on the remote.
1003 # on the remote.
1008 remotephases = {'publishing': 'True'}
1004 remotephases = {'publishing': 'True'}
1009 if not remotephases: # old server or public only reply from non-publishing
1005 if not remotephases: # old server or public only reply from non-publishing
1010 _localphasemove(pushop, cheads)
1006 _localphasemove(pushop, cheads)
1011 # don't push any phase data as there is nothing to push
1007 # don't push any phase data as there is nothing to push
1012 else:
1008 else:
1013 ana = phases.analyzeremotephases(pushop.repo, cheads,
1009 ana = phases.analyzeremotephases(pushop.repo, cheads,
1014 remotephases)
1010 remotephases)
1015 pheads, droots = ana
1011 pheads, droots = ana
1016 ### Apply remote phase on local
1012 ### Apply remote phase on local
1017 if remotephases.get('publishing', False):
1013 if remotephases.get('publishing', False):
1018 _localphasemove(pushop, cheads)
1014 _localphasemove(pushop, cheads)
1019 else: # publish = False
1015 else: # publish = False
1020 _localphasemove(pushop, pheads)
1016 _localphasemove(pushop, pheads)
1021 _localphasemove(pushop, cheads, phases.draft)
1017 _localphasemove(pushop, cheads, phases.draft)
1022 ### Apply local phase on remote
1018 ### Apply local phase on remote
1023
1019
1024 if pushop.cgresult:
1020 if pushop.cgresult:
1025 if 'phases' in pushop.stepsdone:
1021 if 'phases' in pushop.stepsdone:
1026 # phases already pushed though bundle2
1022 # phases already pushed though bundle2
1027 return
1023 return
1028 outdated = pushop.outdatedphases
1024 outdated = pushop.outdatedphases
1029 else:
1025 else:
1030 outdated = pushop.fallbackoutdatedphases
1026 outdated = pushop.fallbackoutdatedphases
1031
1027
1032 pushop.stepsdone.add('phases')
1028 pushop.stepsdone.add('phases')
1033
1029
1034 # filter heads already turned public by the push
1030 # filter heads already turned public by the push
1035 outdated = [c for c in outdated if c.node() not in pheads]
1031 outdated = [c for c in outdated if c.node() not in pheads]
1036 # fallback to independent pushkey command
1032 # fallback to independent pushkey command
1037 for newremotehead in outdated:
1033 for newremotehead in outdated:
1038 r = pushop.remote.pushkey('phases',
1034 r = pushop.remote.pushkey('phases',
1039 newremotehead.hex(),
1035 newremotehead.hex(),
1040 str(phases.draft),
1036 str(phases.draft),
1041 str(phases.public))
1037 str(phases.public))
1042 if not r:
1038 if not r:
1043 pushop.ui.warn(_('updating %s to public failed!\n')
1039 pushop.ui.warn(_('updating %s to public failed!\n')
1044 % newremotehead)
1040 % newremotehead)
1045
1041
1046 def _localphasemove(pushop, nodes, phase=phases.public):
1042 def _localphasemove(pushop, nodes, phase=phases.public):
1047 """move <nodes> to <phase> in the local source repo"""
1043 """move <nodes> to <phase> in the local source repo"""
1048 if pushop.trmanager:
1044 if pushop.trmanager:
1049 phases.advanceboundary(pushop.repo,
1045 phases.advanceboundary(pushop.repo,
1050 pushop.trmanager.transaction(),
1046 pushop.trmanager.transaction(),
1051 phase,
1047 phase,
1052 nodes)
1048 nodes)
1053 else:
1049 else:
1054 # repo is not locked, do not change any phases!
1050 # repo is not locked, do not change any phases!
1055 # Informs the user that phases should have been moved when
1051 # Informs the user that phases should have been moved when
1056 # applicable.
1052 # applicable.
1057 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1053 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1058 phasestr = phases.phasenames[phase]
1054 phasestr = phases.phasenames[phase]
1059 if actualmoves:
1055 if actualmoves:
1060 pushop.ui.status(_('cannot lock source repo, skipping '
1056 pushop.ui.status(_('cannot lock source repo, skipping '
1061 'local %s phase update\n') % phasestr)
1057 'local %s phase update\n') % phasestr)
1062
1058
1063 def _pushobsolete(pushop):
1059 def _pushobsolete(pushop):
1064 """utility function to push obsolete markers to a remote"""
1060 """utility function to push obsolete markers to a remote"""
1065 if 'obsmarkers' in pushop.stepsdone:
1061 if 'obsmarkers' in pushop.stepsdone:
1066 return
1062 return
1067 repo = pushop.repo
1063 repo = pushop.repo
1068 remote = pushop.remote
1064 remote = pushop.remote
1069 pushop.stepsdone.add('obsmarkers')
1065 pushop.stepsdone.add('obsmarkers')
1070 if pushop.outobsmarkers:
1066 if pushop.outobsmarkers:
1071 pushop.ui.debug('try to push obsolete markers to remote\n')
1067 pushop.ui.debug('try to push obsolete markers to remote\n')
1072 rslts = []
1068 rslts = []
1073 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1069 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1074 for key in sorted(remotedata, reverse=True):
1070 for key in sorted(remotedata, reverse=True):
1075 # reverse sort to ensure we end with dump0
1071 # reverse sort to ensure we end with dump0
1076 data = remotedata[key]
1072 data = remotedata[key]
1077 rslts.append(remote.pushkey('obsolete', key, '', data))
1073 rslts.append(remote.pushkey('obsolete', key, '', data))
1078 if [r for r in rslts if not r]:
1074 if [r for r in rslts if not r]:
1079 msg = _('failed to push some obsolete markers!\n')
1075 msg = _('failed to push some obsolete markers!\n')
1080 repo.ui.warn(msg)
1076 repo.ui.warn(msg)
1081
1077
1082 def _pushbookmark(pushop):
1078 def _pushbookmark(pushop):
1083 """Update bookmark position on remote"""
1079 """Update bookmark position on remote"""
1084 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1080 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1085 return
1081 return
1086 pushop.stepsdone.add('bookmarks')
1082 pushop.stepsdone.add('bookmarks')
1087 ui = pushop.ui
1083 ui = pushop.ui
1088 remote = pushop.remote
1084 remote = pushop.remote
1089
1085
1090 for b, old, new in pushop.outbookmarks:
1086 for b, old, new in pushop.outbookmarks:
1091 action = 'update'
1087 action = 'update'
1092 if not old:
1088 if not old:
1093 action = 'export'
1089 action = 'export'
1094 elif not new:
1090 elif not new:
1095 action = 'delete'
1091 action = 'delete'
1096 if remote.pushkey('bookmarks', b, old, new):
1092 if remote.pushkey('bookmarks', b, old, new):
1097 ui.status(bookmsgmap[action][0] % b)
1093 ui.status(bookmsgmap[action][0] % b)
1098 else:
1094 else:
1099 ui.warn(bookmsgmap[action][1] % b)
1095 ui.warn(bookmsgmap[action][1] % b)
1100 # discovery can have set the value form invalid entry
1096 # discovery can have set the value form invalid entry
1101 if pushop.bkresult is not None:
1097 if pushop.bkresult is not None:
1102 pushop.bkresult = 1
1098 pushop.bkresult = 1
1103
1099
1104 class pulloperation(object):
1100 class pulloperation(object):
1105 """A object that represent a single pull operation
1101 """A object that represent a single pull operation
1106
1102
1107 It purpose is to carry pull related state and very common operation.
1103 It purpose is to carry pull related state and very common operation.
1108
1104
1109 A new should be created at the beginning of each pull and discarded
1105 A new should be created at the beginning of each pull and discarded
1110 afterward.
1106 afterward.
1111 """
1107 """
1112
1108
1113 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1109 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1114 remotebookmarks=None, streamclonerequested=None):
1110 remotebookmarks=None, streamclonerequested=None):
1115 # repo we pull into
1111 # repo we pull into
1116 self.repo = repo
1112 self.repo = repo
1117 # repo we pull from
1113 # repo we pull from
1118 self.remote = remote
1114 self.remote = remote
1119 # revision we try to pull (None is "all")
1115 # revision we try to pull (None is "all")
1120 self.heads = heads
1116 self.heads = heads
1121 # bookmark pulled explicitly
1117 # bookmark pulled explicitly
1122 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1118 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1123 for bookmark in bookmarks]
1119 for bookmark in bookmarks]
1124 # do we force pull?
1120 # do we force pull?
1125 self.force = force
1121 self.force = force
1126 # whether a streaming clone was requested
1122 # whether a streaming clone was requested
1127 self.streamclonerequested = streamclonerequested
1123 self.streamclonerequested = streamclonerequested
1128 # transaction manager
1124 # transaction manager
1129 self.trmanager = None
1125 self.trmanager = None
1130 # set of common changeset between local and remote before pull
1126 # set of common changeset between local and remote before pull
1131 self.common = None
1127 self.common = None
1132 # set of pulled head
1128 # set of pulled head
1133 self.rheads = None
1129 self.rheads = None
1134 # list of missing changeset to fetch remotely
1130 # list of missing changeset to fetch remotely
1135 self.fetch = None
1131 self.fetch = None
1136 # remote bookmarks data
1132 # remote bookmarks data
1137 self.remotebookmarks = remotebookmarks
1133 self.remotebookmarks = remotebookmarks
1138 # result of changegroup pulling (used as return code by pull)
1134 # result of changegroup pulling (used as return code by pull)
1139 self.cgresult = None
1135 self.cgresult = None
1140 # list of step already done
1136 # list of step already done
1141 self.stepsdone = set()
1137 self.stepsdone = set()
1142 # Whether we attempted a clone from pre-generated bundles.
1138 # Whether we attempted a clone from pre-generated bundles.
1143 self.clonebundleattempted = False
1139 self.clonebundleattempted = False
1144
1140
1145 @util.propertycache
1141 @util.propertycache
1146 def pulledsubset(self):
1142 def pulledsubset(self):
1147 """heads of the set of changeset target by the pull"""
1143 """heads of the set of changeset target by the pull"""
1148 # compute target subset
1144 # compute target subset
1149 if self.heads is None:
1145 if self.heads is None:
1150 # We pulled every thing possible
1146 # We pulled every thing possible
1151 # sync on everything common
1147 # sync on everything common
1152 c = set(self.common)
1148 c = set(self.common)
1153 ret = list(self.common)
1149 ret = list(self.common)
1154 for n in self.rheads:
1150 for n in self.rheads:
1155 if n not in c:
1151 if n not in c:
1156 ret.append(n)
1152 ret.append(n)
1157 return ret
1153 return ret
1158 else:
1154 else:
1159 # We pulled a specific subset
1155 # We pulled a specific subset
1160 # sync on this subset
1156 # sync on this subset
1161 return self.heads
1157 return self.heads
1162
1158
1163 @util.propertycache
1159 @util.propertycache
1164 def canusebundle2(self):
1160 def canusebundle2(self):
1165 return not _forcebundle1(self)
1161 return not _forcebundle1(self)
1166
1162
1167 @util.propertycache
1163 @util.propertycache
1168 def remotebundle2caps(self):
1164 def remotebundle2caps(self):
1169 return bundle2.bundle2caps(self.remote)
1165 return bundle2.bundle2caps(self.remote)
1170
1166
1171 def gettransaction(self):
1167 def gettransaction(self):
1172 # deprecated; talk to trmanager directly
1168 # deprecated; talk to trmanager directly
1173 return self.trmanager.transaction()
1169 return self.trmanager.transaction()
1174
1170
1175 class transactionmanager(util.transactional):
1171 class transactionmanager(util.transactional):
1176 """An object to manage the life cycle of a transaction
1172 """An object to manage the life cycle of a transaction
1177
1173
1178 It creates the transaction on demand and calls the appropriate hooks when
1174 It creates the transaction on demand and calls the appropriate hooks when
1179 closing the transaction."""
1175 closing the transaction."""
1180 def __init__(self, repo, source, url):
1176 def __init__(self, repo, source, url):
1181 self.repo = repo
1177 self.repo = repo
1182 self.source = source
1178 self.source = source
1183 self.url = url
1179 self.url = url
1184 self._tr = None
1180 self._tr = None
1185
1181
1186 def transaction(self):
1182 def transaction(self):
1187 """Return an open transaction object, constructing if necessary"""
1183 """Return an open transaction object, constructing if necessary"""
1188 if not self._tr:
1184 if not self._tr:
1189 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1185 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1190 self._tr = self.repo.transaction(trname)
1186 self._tr = self.repo.transaction(trname)
1191 self._tr.hookargs['source'] = self.source
1187 self._tr.hookargs['source'] = self.source
1192 self._tr.hookargs['url'] = self.url
1188 self._tr.hookargs['url'] = self.url
1193 return self._tr
1189 return self._tr
1194
1190
1195 def close(self):
1191 def close(self):
1196 """close transaction if created"""
1192 """close transaction if created"""
1197 if self._tr is not None:
1193 if self._tr is not None:
1198 self._tr.close()
1194 self._tr.close()
1199
1195
1200 def release(self):
1196 def release(self):
1201 """release transaction if created"""
1197 """release transaction if created"""
1202 if self._tr is not None:
1198 if self._tr is not None:
1203 self._tr.release()
1199 self._tr.release()
1204
1200
1205 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1201 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1206 streamclonerequested=None):
1202 streamclonerequested=None):
1207 """Fetch repository data from a remote.
1203 """Fetch repository data from a remote.
1208
1204
1209 This is the main function used to retrieve data from a remote repository.
1205 This is the main function used to retrieve data from a remote repository.
1210
1206
1211 ``repo`` is the local repository to clone into.
1207 ``repo`` is the local repository to clone into.
1212 ``remote`` is a peer instance.
1208 ``remote`` is a peer instance.
1213 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1209 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1214 default) means to pull everything from the remote.
1210 default) means to pull everything from the remote.
1215 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1211 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1216 default, all remote bookmarks are pulled.
1212 default, all remote bookmarks are pulled.
1217 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1213 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1218 initialization.
1214 initialization.
1219 ``streamclonerequested`` is a boolean indicating whether a "streaming
1215 ``streamclonerequested`` is a boolean indicating whether a "streaming
1220 clone" is requested. A "streaming clone" is essentially a raw file copy
1216 clone" is requested. A "streaming clone" is essentially a raw file copy
1221 of revlogs from the server. This only works when the local repository is
1217 of revlogs from the server. This only works when the local repository is
1222 empty. The default value of ``None`` means to respect the server
1218 empty. The default value of ``None`` means to respect the server
1223 configuration for preferring stream clones.
1219 configuration for preferring stream clones.
1224
1220
1225 Returns the ``pulloperation`` created for this pull.
1221 Returns the ``pulloperation`` created for this pull.
1226 """
1222 """
1227 if opargs is None:
1223 if opargs is None:
1228 opargs = {}
1224 opargs = {}
1229 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1225 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1230 streamclonerequested=streamclonerequested, **opargs)
1226 streamclonerequested=streamclonerequested, **opargs)
1231
1227
1232 peerlocal = pullop.remote.local()
1228 peerlocal = pullop.remote.local()
1233 if peerlocal:
1229 if peerlocal:
1234 missing = set(peerlocal.requirements) - pullop.repo.supported
1230 missing = set(peerlocal.requirements) - pullop.repo.supported
1235 if missing:
1231 if missing:
1236 msg = _("required features are not"
1232 msg = _("required features are not"
1237 " supported in the destination:"
1233 " supported in the destination:"
1238 " %s") % (', '.join(sorted(missing)))
1234 " %s") % (', '.join(sorted(missing)))
1239 raise error.Abort(msg)
1235 raise error.Abort(msg)
1240
1236
1241 wlock = lock = None
1237 wlock = lock = None
1242 try:
1238 try:
1243 wlock = pullop.repo.wlock()
1239 wlock = pullop.repo.wlock()
1244 lock = pullop.repo.lock()
1240 lock = pullop.repo.lock()
1245 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1241 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1246 streamclone.maybeperformlegacystreamclone(pullop)
1242 streamclone.maybeperformlegacystreamclone(pullop)
1247 # This should ideally be in _pullbundle2(). However, it needs to run
1243 # This should ideally be in _pullbundle2(). However, it needs to run
1248 # before discovery to avoid extra work.
1244 # before discovery to avoid extra work.
1249 _maybeapplyclonebundle(pullop)
1245 _maybeapplyclonebundle(pullop)
1250 _pulldiscovery(pullop)
1246 _pulldiscovery(pullop)
1251 if pullop.canusebundle2:
1247 if pullop.canusebundle2:
1252 _pullbundle2(pullop)
1248 _pullbundle2(pullop)
1253 _pullchangeset(pullop)
1249 _pullchangeset(pullop)
1254 _pullphase(pullop)
1250 _pullphase(pullop)
1255 _pullbookmarks(pullop)
1251 _pullbookmarks(pullop)
1256 _pullobsolete(pullop)
1252 _pullobsolete(pullop)
1257 pullop.trmanager.close()
1253 pullop.trmanager.close()
1258 finally:
1254 finally:
1259 lockmod.release(pullop.trmanager, lock, wlock)
1255 lockmod.release(pullop.trmanager, lock, wlock)
1260
1256
1261 return pullop
1257 return pullop
1262
1258
1263 # list of steps to perform discovery before pull
1259 # list of steps to perform discovery before pull
1264 pulldiscoveryorder = []
1260 pulldiscoveryorder = []
1265
1261
1266 # Mapping between step name and function
1262 # Mapping between step name and function
1267 #
1263 #
1268 # This exists to help extensions wrap steps if necessary
1264 # This exists to help extensions wrap steps if necessary
1269 pulldiscoverymapping = {}
1265 pulldiscoverymapping = {}
1270
1266
1271 def pulldiscovery(stepname):
1267 def pulldiscovery(stepname):
1272 """decorator for function performing discovery before pull
1268 """decorator for function performing discovery before pull
1273
1269
1274 The function is added to the step -> function mapping and appended to the
1270 The function is added to the step -> function mapping and appended to the
1275 list of steps. Beware that decorated function will be added in order (this
1271 list of steps. Beware that decorated function will be added in order (this
1276 may matter).
1272 may matter).
1277
1273
1278 You can only use this decorator for a new step, if you want to wrap a step
1274 You can only use this decorator for a new step, if you want to wrap a step
1279 from an extension, change the pulldiscovery dictionary directly."""
1275 from an extension, change the pulldiscovery dictionary directly."""
1280 def dec(func):
1276 def dec(func):
1281 assert stepname not in pulldiscoverymapping
1277 assert stepname not in pulldiscoverymapping
1282 pulldiscoverymapping[stepname] = func
1278 pulldiscoverymapping[stepname] = func
1283 pulldiscoveryorder.append(stepname)
1279 pulldiscoveryorder.append(stepname)
1284 return func
1280 return func
1285 return dec
1281 return dec
1286
1282
1287 def _pulldiscovery(pullop):
1283 def _pulldiscovery(pullop):
1288 """Run all discovery steps"""
1284 """Run all discovery steps"""
1289 for stepname in pulldiscoveryorder:
1285 for stepname in pulldiscoveryorder:
1290 step = pulldiscoverymapping[stepname]
1286 step = pulldiscoverymapping[stepname]
1291 step(pullop)
1287 step(pullop)
1292
1288
1293 @pulldiscovery('b1:bookmarks')
1289 @pulldiscovery('b1:bookmarks')
1294 def _pullbookmarkbundle1(pullop):
1290 def _pullbookmarkbundle1(pullop):
1295 """fetch bookmark data in bundle1 case
1291 """fetch bookmark data in bundle1 case
1296
1292
1297 If not using bundle2, we have to fetch bookmarks before changeset
1293 If not using bundle2, we have to fetch bookmarks before changeset
1298 discovery to reduce the chance and impact of race conditions."""
1294 discovery to reduce the chance and impact of race conditions."""
1299 if pullop.remotebookmarks is not None:
1295 if pullop.remotebookmarks is not None:
1300 return
1296 return
1301 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1297 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1302 # all known bundle2 servers now support listkeys, but lets be nice with
1298 # all known bundle2 servers now support listkeys, but lets be nice with
1303 # new implementation.
1299 # new implementation.
1304 return
1300 return
1305 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1301 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1306
1302
1307
1303
1308 @pulldiscovery('changegroup')
1304 @pulldiscovery('changegroup')
1309 def _pulldiscoverychangegroup(pullop):
1305 def _pulldiscoverychangegroup(pullop):
1310 """discovery phase for the pull
1306 """discovery phase for the pull
1311
1307
1312 Current handle changeset discovery only, will change handle all discovery
1308 Current handle changeset discovery only, will change handle all discovery
1313 at some point."""
1309 at some point."""
1314 tmp = discovery.findcommonincoming(pullop.repo,
1310 tmp = discovery.findcommonincoming(pullop.repo,
1315 pullop.remote,
1311 pullop.remote,
1316 heads=pullop.heads,
1312 heads=pullop.heads,
1317 force=pullop.force)
1313 force=pullop.force)
1318 common, fetch, rheads = tmp
1314 common, fetch, rheads = tmp
1319 nm = pullop.repo.unfiltered().changelog.nodemap
1315 nm = pullop.repo.unfiltered().changelog.nodemap
1320 if fetch and rheads:
1316 if fetch and rheads:
1321 # If a remote heads in filtered locally, lets drop it from the unknown
1317 # If a remote heads in filtered locally, lets drop it from the unknown
1322 # remote heads and put in back in common.
1318 # remote heads and put in back in common.
1323 #
1319 #
1324 # This is a hackish solution to catch most of "common but locally
1320 # This is a hackish solution to catch most of "common but locally
1325 # hidden situation". We do not performs discovery on unfiltered
1321 # hidden situation". We do not performs discovery on unfiltered
1326 # repository because it end up doing a pathological amount of round
1322 # repository because it end up doing a pathological amount of round
1327 # trip for w huge amount of changeset we do not care about.
1323 # trip for w huge amount of changeset we do not care about.
1328 #
1324 #
1329 # If a set of such "common but filtered" changeset exist on the server
1325 # If a set of such "common but filtered" changeset exist on the server
1330 # but are not including a remote heads, we'll not be able to detect it,
1326 # but are not including a remote heads, we'll not be able to detect it,
1331 scommon = set(common)
1327 scommon = set(common)
1332 filteredrheads = []
1328 filteredrheads = []
1333 for n in rheads:
1329 for n in rheads:
1334 if n in nm:
1330 if n in nm:
1335 if n not in scommon:
1331 if n not in scommon:
1336 common.append(n)
1332 common.append(n)
1337 else:
1333 else:
1338 filteredrheads.append(n)
1334 filteredrheads.append(n)
1339 if not filteredrheads:
1335 if not filteredrheads:
1340 fetch = []
1336 fetch = []
1341 rheads = filteredrheads
1337 rheads = filteredrheads
1342 pullop.common = common
1338 pullop.common = common
1343 pullop.fetch = fetch
1339 pullop.fetch = fetch
1344 pullop.rheads = rheads
1340 pullop.rheads = rheads
1345
1341
1346 def _pullbundle2(pullop):
1342 def _pullbundle2(pullop):
1347 """pull data using bundle2
1343 """pull data using bundle2
1348
1344
1349 For now, the only supported data are changegroup."""
1345 For now, the only supported data are changegroup."""
1350 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1346 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1351
1347
1352 # At the moment we don't do stream clones over bundle2. If that is
1348 # At the moment we don't do stream clones over bundle2. If that is
1353 # implemented then here's where the check for that will go.
1349 # implemented then here's where the check for that will go.
1354 streaming = False
1350 streaming = False
1355
1351
1356 # pulling changegroup
1352 # pulling changegroup
1357 pullop.stepsdone.add('changegroup')
1353 pullop.stepsdone.add('changegroup')
1358
1354
1359 kwargs['common'] = pullop.common
1355 kwargs['common'] = pullop.common
1360 kwargs['heads'] = pullop.heads or pullop.rheads
1356 kwargs['heads'] = pullop.heads or pullop.rheads
1361 kwargs['cg'] = pullop.fetch
1357 kwargs['cg'] = pullop.fetch
1362 if 'listkeys' in pullop.remotebundle2caps:
1358 if 'listkeys' in pullop.remotebundle2caps:
1363 kwargs['listkeys'] = ['phases']
1359 kwargs['listkeys'] = ['phases']
1364 if pullop.remotebookmarks is None:
1360 if pullop.remotebookmarks is None:
1365 # make sure to always includes bookmark data when migrating
1361 # make sure to always includes bookmark data when migrating
1366 # `hg incoming --bundle` to using this function.
1362 # `hg incoming --bundle` to using this function.
1367 kwargs['listkeys'].append('bookmarks')
1363 kwargs['listkeys'].append('bookmarks')
1368
1364
1369 # If this is a full pull / clone and the server supports the clone bundles
1365 # If this is a full pull / clone and the server supports the clone bundles
1370 # feature, tell the server whether we attempted a clone bundle. The
1366 # feature, tell the server whether we attempted a clone bundle. The
1371 # presence of this flag indicates the client supports clone bundles. This
1367 # presence of this flag indicates the client supports clone bundles. This
1372 # will enable the server to treat clients that support clone bundles
1368 # will enable the server to treat clients that support clone bundles
1373 # differently from those that don't.
1369 # differently from those that don't.
1374 if (pullop.remote.capable('clonebundles')
1370 if (pullop.remote.capable('clonebundles')
1375 and pullop.heads is None and list(pullop.common) == [nullid]):
1371 and pullop.heads is None and list(pullop.common) == [nullid]):
1376 kwargs['cbattempted'] = pullop.clonebundleattempted
1372 kwargs['cbattempted'] = pullop.clonebundleattempted
1377
1373
1378 if streaming:
1374 if streaming:
1379 pullop.repo.ui.status(_('streaming all changes\n'))
1375 pullop.repo.ui.status(_('streaming all changes\n'))
1380 elif not pullop.fetch:
1376 elif not pullop.fetch:
1381 pullop.repo.ui.status(_("no changes found\n"))
1377 pullop.repo.ui.status(_("no changes found\n"))
1382 pullop.cgresult = 0
1378 pullop.cgresult = 0
1383 else:
1379 else:
1384 if pullop.heads is None and list(pullop.common) == [nullid]:
1380 if pullop.heads is None and list(pullop.common) == [nullid]:
1385 pullop.repo.ui.status(_("requesting all changes\n"))
1381 pullop.repo.ui.status(_("requesting all changes\n"))
1386 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1382 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1387 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1383 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1388 if obsolete.commonversion(remoteversions) is not None:
1384 if obsolete.commonversion(remoteversions) is not None:
1389 kwargs['obsmarkers'] = True
1385 kwargs['obsmarkers'] = True
1390 pullop.stepsdone.add('obsmarkers')
1386 pullop.stepsdone.add('obsmarkers')
1391 _pullbundle2extraprepare(pullop, kwargs)
1387 _pullbundle2extraprepare(pullop, kwargs)
1392 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1388 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1393 try:
1389 try:
1394 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1390 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1395 except bundle2.AbortFromPart as exc:
1391 except bundle2.AbortFromPart as exc:
1396 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1392 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1397 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1393 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1398 except error.BundleValueError as exc:
1394 except error.BundleValueError as exc:
1399 raise error.Abort(_('missing support for %s') % exc)
1395 raise error.Abort(_('missing support for %s') % exc)
1400
1396
1401 if pullop.fetch:
1397 if pullop.fetch:
1402 pullop.cgresult = bundle2.combinechangegroupresults(op)
1398 pullop.cgresult = bundle2.combinechangegroupresults(op)
1403
1399
1404 # If the bundle had a phase-heads part, then phase exchange is already done
1400 # If the bundle had a phase-heads part, then phase exchange is already done
1405 if op.records['phase-heads']:
1401 if op.records['phase-heads']:
1406 pullop.stepsdone.add('phases')
1402 pullop.stepsdone.add('phases')
1407
1403
1408 # processing phases change
1404 # processing phases change
1409 for namespace, value in op.records['listkeys']:
1405 for namespace, value in op.records['listkeys']:
1410 if namespace == 'phases':
1406 if namespace == 'phases':
1411 _pullapplyphases(pullop, value)
1407 _pullapplyphases(pullop, value)
1412
1408
1413 # processing bookmark update
1409 # processing bookmark update
1414 for namespace, value in op.records['listkeys']:
1410 for namespace, value in op.records['listkeys']:
1415 if namespace == 'bookmarks':
1411 if namespace == 'bookmarks':
1416 pullop.remotebookmarks = value
1412 pullop.remotebookmarks = value
1417
1413
1418 # bookmark data were either already there or pulled in the bundle
1414 # bookmark data were either already there or pulled in the bundle
1419 if pullop.remotebookmarks is not None:
1415 if pullop.remotebookmarks is not None:
1420 _pullbookmarks(pullop)
1416 _pullbookmarks(pullop)
1421
1417
1422 def _pullbundle2extraprepare(pullop, kwargs):
1418 def _pullbundle2extraprepare(pullop, kwargs):
1423 """hook function so that extensions can extend the getbundle call"""
1419 """hook function so that extensions can extend the getbundle call"""
1424 pass
1420 pass
1425
1421
1426 def _pullchangeset(pullop):
1422 def _pullchangeset(pullop):
1427 """pull changeset from unbundle into the local repo"""
1423 """pull changeset from unbundle into the local repo"""
1428 # We delay the open of the transaction as late as possible so we
1424 # We delay the open of the transaction as late as possible so we
1429 # don't open transaction for nothing or you break future useful
1425 # don't open transaction for nothing or you break future useful
1430 # rollback call
1426 # rollback call
1431 if 'changegroup' in pullop.stepsdone:
1427 if 'changegroup' in pullop.stepsdone:
1432 return
1428 return
1433 pullop.stepsdone.add('changegroup')
1429 pullop.stepsdone.add('changegroup')
1434 if not pullop.fetch:
1430 if not pullop.fetch:
1435 pullop.repo.ui.status(_("no changes found\n"))
1431 pullop.repo.ui.status(_("no changes found\n"))
1436 pullop.cgresult = 0
1432 pullop.cgresult = 0
1437 return
1433 return
1438 tr = pullop.gettransaction()
1434 tr = pullop.gettransaction()
1439 if pullop.heads is None and list(pullop.common) == [nullid]:
1435 if pullop.heads is None and list(pullop.common) == [nullid]:
1440 pullop.repo.ui.status(_("requesting all changes\n"))
1436 pullop.repo.ui.status(_("requesting all changes\n"))
1441 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1437 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1442 # issue1320, avoid a race if remote changed after discovery
1438 # issue1320, avoid a race if remote changed after discovery
1443 pullop.heads = pullop.rheads
1439 pullop.heads = pullop.rheads
1444
1440
1445 if pullop.remote.capable('getbundle'):
1441 if pullop.remote.capable('getbundle'):
1446 # TODO: get bundlecaps from remote
1442 # TODO: get bundlecaps from remote
1447 cg = pullop.remote.getbundle('pull', common=pullop.common,
1443 cg = pullop.remote.getbundle('pull', common=pullop.common,
1448 heads=pullop.heads or pullop.rheads)
1444 heads=pullop.heads or pullop.rheads)
1449 elif pullop.heads is None:
1445 elif pullop.heads is None:
1450 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1446 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1451 elif not pullop.remote.capable('changegroupsubset'):
1447 elif not pullop.remote.capable('changegroupsubset'):
1452 raise error.Abort(_("partial pull cannot be done because "
1448 raise error.Abort(_("partial pull cannot be done because "
1453 "other repository doesn't support "
1449 "other repository doesn't support "
1454 "changegroupsubset."))
1450 "changegroupsubset."))
1455 else:
1451 else:
1456 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1452 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1457 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1453 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1458 pullop.remote.url())
1454 pullop.remote.url())
1459 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1455 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1460
1456
1461 def _pullphase(pullop):
1457 def _pullphase(pullop):
1462 # Get remote phases data from remote
1458 # Get remote phases data from remote
1463 if 'phases' in pullop.stepsdone:
1459 if 'phases' in pullop.stepsdone:
1464 return
1460 return
1465 remotephases = pullop.remote.listkeys('phases')
1461 remotephases = pullop.remote.listkeys('phases')
1466 _pullapplyphases(pullop, remotephases)
1462 _pullapplyphases(pullop, remotephases)
1467
1463
1468 def _pullapplyphases(pullop, remotephases):
1464 def _pullapplyphases(pullop, remotephases):
1469 """apply phase movement from observed remote state"""
1465 """apply phase movement from observed remote state"""
1470 if 'phases' in pullop.stepsdone:
1466 if 'phases' in pullop.stepsdone:
1471 return
1467 return
1472 pullop.stepsdone.add('phases')
1468 pullop.stepsdone.add('phases')
1473 publishing = bool(remotephases.get('publishing', False))
1469 publishing = bool(remotephases.get('publishing', False))
1474 if remotephases and not publishing:
1470 if remotephases and not publishing:
1475 # remote is new and non-publishing
1471 # remote is new and non-publishing
1476 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1472 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1477 pullop.pulledsubset,
1473 pullop.pulledsubset,
1478 remotephases)
1474 remotephases)
1479 dheads = pullop.pulledsubset
1475 dheads = pullop.pulledsubset
1480 else:
1476 else:
1481 # Remote is old or publishing all common changesets
1477 # Remote is old or publishing all common changesets
1482 # should be seen as public
1478 # should be seen as public
1483 pheads = pullop.pulledsubset
1479 pheads = pullop.pulledsubset
1484 dheads = []
1480 dheads = []
1485 unfi = pullop.repo.unfiltered()
1481 unfi = pullop.repo.unfiltered()
1486 phase = unfi._phasecache.phase
1482 phase = unfi._phasecache.phase
1487 rev = unfi.changelog.nodemap.get
1483 rev = unfi.changelog.nodemap.get
1488 public = phases.public
1484 public = phases.public
1489 draft = phases.draft
1485 draft = phases.draft
1490
1486
1491 # exclude changesets already public locally and update the others
1487 # exclude changesets already public locally and update the others
1492 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1488 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1493 if pheads:
1489 if pheads:
1494 tr = pullop.gettransaction()
1490 tr = pullop.gettransaction()
1495 phases.advanceboundary(pullop.repo, tr, public, pheads)
1491 phases.advanceboundary(pullop.repo, tr, public, pheads)
1496
1492
1497 # exclude changesets already draft locally and update the others
1493 # exclude changesets already draft locally and update the others
1498 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1494 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1499 if dheads:
1495 if dheads:
1500 tr = pullop.gettransaction()
1496 tr = pullop.gettransaction()
1501 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1497 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1502
1498
1503 def _pullbookmarks(pullop):
1499 def _pullbookmarks(pullop):
1504 """process the remote bookmark information to update the local one"""
1500 """process the remote bookmark information to update the local one"""
1505 if 'bookmarks' in pullop.stepsdone:
1501 if 'bookmarks' in pullop.stepsdone:
1506 return
1502 return
1507 pullop.stepsdone.add('bookmarks')
1503 pullop.stepsdone.add('bookmarks')
1508 repo = pullop.repo
1504 repo = pullop.repo
1509 remotebookmarks = pullop.remotebookmarks
1505 remotebookmarks = pullop.remotebookmarks
1510 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1506 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1511 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1507 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1512 pullop.remote.url(),
1508 pullop.remote.url(),
1513 pullop.gettransaction,
1509 pullop.gettransaction,
1514 explicit=pullop.explicitbookmarks)
1510 explicit=pullop.explicitbookmarks)
1515
1511
1516 def _pullobsolete(pullop):
1512 def _pullobsolete(pullop):
1517 """utility function to pull obsolete markers from a remote
1513 """utility function to pull obsolete markers from a remote
1518
1514
1519 The `gettransaction` is function that return the pull transaction, creating
1515 The `gettransaction` is function that return the pull transaction, creating
1520 one if necessary. We return the transaction to inform the calling code that
1516 one if necessary. We return the transaction to inform the calling code that
1521 a new transaction have been created (when applicable).
1517 a new transaction have been created (when applicable).
1522
1518
1523 Exists mostly to allow overriding for experimentation purpose"""
1519 Exists mostly to allow overriding for experimentation purpose"""
1524 if 'obsmarkers' in pullop.stepsdone:
1520 if 'obsmarkers' in pullop.stepsdone:
1525 return
1521 return
1526 pullop.stepsdone.add('obsmarkers')
1522 pullop.stepsdone.add('obsmarkers')
1527 tr = None
1523 tr = None
1528 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1524 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1529 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1525 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1530 remoteobs = pullop.remote.listkeys('obsolete')
1526 remoteobs = pullop.remote.listkeys('obsolete')
1531 if 'dump0' in remoteobs:
1527 if 'dump0' in remoteobs:
1532 tr = pullop.gettransaction()
1528 tr = pullop.gettransaction()
1533 markers = []
1529 markers = []
1534 for key in sorted(remoteobs, reverse=True):
1530 for key in sorted(remoteobs, reverse=True):
1535 if key.startswith('dump'):
1531 if key.startswith('dump'):
1536 data = util.b85decode(remoteobs[key])
1532 data = util.b85decode(remoteobs[key])
1537 version, newmarks = obsolete._readmarkers(data)
1533 version, newmarks = obsolete._readmarkers(data)
1538 markers += newmarks
1534 markers += newmarks
1539 if markers:
1535 if markers:
1540 pullop.repo.obsstore.add(tr, markers)
1536 pullop.repo.obsstore.add(tr, markers)
1541 pullop.repo.invalidatevolatilesets()
1537 pullop.repo.invalidatevolatilesets()
1542 return tr
1538 return tr
1543
1539
1544 def caps20to10(repo):
1540 def caps20to10(repo):
1545 """return a set with appropriate options to use bundle20 during getbundle"""
1541 """return a set with appropriate options to use bundle20 during getbundle"""
1546 caps = {'HG20'}
1542 caps = {'HG20'}
1547 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1543 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1548 caps.add('bundle2=' + urlreq.quote(capsblob))
1544 caps.add('bundle2=' + urlreq.quote(capsblob))
1549 return caps
1545 return caps
1550
1546
1551 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1547 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1552 getbundle2partsorder = []
1548 getbundle2partsorder = []
1553
1549
1554 # Mapping between step name and function
1550 # Mapping between step name and function
1555 #
1551 #
1556 # This exists to help extensions wrap steps if necessary
1552 # This exists to help extensions wrap steps if necessary
1557 getbundle2partsmapping = {}
1553 getbundle2partsmapping = {}
1558
1554
1559 def getbundle2partsgenerator(stepname, idx=None):
1555 def getbundle2partsgenerator(stepname, idx=None):
1560 """decorator for function generating bundle2 part for getbundle
1556 """decorator for function generating bundle2 part for getbundle
1561
1557
1562 The function is added to the step -> function mapping and appended to the
1558 The function is added to the step -> function mapping and appended to the
1563 list of steps. Beware that decorated functions will be added in order
1559 list of steps. Beware that decorated functions will be added in order
1564 (this may matter).
1560 (this may matter).
1565
1561
1566 You can only use this decorator for new steps, if you want to wrap a step
1562 You can only use this decorator for new steps, if you want to wrap a step
1567 from an extension, attack the getbundle2partsmapping dictionary directly."""
1563 from an extension, attack the getbundle2partsmapping dictionary directly."""
1568 def dec(func):
1564 def dec(func):
1569 assert stepname not in getbundle2partsmapping
1565 assert stepname not in getbundle2partsmapping
1570 getbundle2partsmapping[stepname] = func
1566 getbundle2partsmapping[stepname] = func
1571 if idx is None:
1567 if idx is None:
1572 getbundle2partsorder.append(stepname)
1568 getbundle2partsorder.append(stepname)
1573 else:
1569 else:
1574 getbundle2partsorder.insert(idx, stepname)
1570 getbundle2partsorder.insert(idx, stepname)
1575 return func
1571 return func
1576 return dec
1572 return dec
1577
1573
1578 def bundle2requested(bundlecaps):
1574 def bundle2requested(bundlecaps):
1579 if bundlecaps is not None:
1575 if bundlecaps is not None:
1580 return any(cap.startswith('HG2') for cap in bundlecaps)
1576 return any(cap.startswith('HG2') for cap in bundlecaps)
1581 return False
1577 return False
1582
1578
1583 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1579 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1584 **kwargs):
1580 **kwargs):
1585 """Return chunks constituting a bundle's raw data.
1581 """Return chunks constituting a bundle's raw data.
1586
1582
1587 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1583 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1588 passed.
1584 passed.
1589
1585
1590 Returns an iterator over raw chunks (of varying sizes).
1586 Returns an iterator over raw chunks (of varying sizes).
1591 """
1587 """
1592 kwargs = pycompat.byteskwargs(kwargs)
1588 kwargs = pycompat.byteskwargs(kwargs)
1593 usebundle2 = bundle2requested(bundlecaps)
1589 usebundle2 = bundle2requested(bundlecaps)
1594 # bundle10 case
1590 # bundle10 case
1595 if not usebundle2:
1591 if not usebundle2:
1596 if bundlecaps and not kwargs.get('cg', True):
1592 if bundlecaps and not kwargs.get('cg', True):
1597 raise ValueError(_('request for bundle10 must include changegroup'))
1593 raise ValueError(_('request for bundle10 must include changegroup'))
1598
1594
1599 if kwargs:
1595 if kwargs:
1600 raise ValueError(_('unsupported getbundle arguments: %s')
1596 raise ValueError(_('unsupported getbundle arguments: %s')
1601 % ', '.join(sorted(kwargs.keys())))
1597 % ', '.join(sorted(kwargs.keys())))
1602 outgoing = _computeoutgoing(repo, heads, common)
1598 outgoing = _computeoutgoing(repo, heads, common)
1603 bundler = changegroup.getbundler('01', repo, bundlecaps)
1599 bundler = changegroup.getbundler('01', repo, bundlecaps)
1604 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1600 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1605
1601
1606 # bundle20 case
1602 # bundle20 case
1607 b2caps = {}
1603 b2caps = {}
1608 for bcaps in bundlecaps:
1604 for bcaps in bundlecaps:
1609 if bcaps.startswith('bundle2='):
1605 if bcaps.startswith('bundle2='):
1610 blob = urlreq.unquote(bcaps[len('bundle2='):])
1606 blob = urlreq.unquote(bcaps[len('bundle2='):])
1611 b2caps.update(bundle2.decodecaps(blob))
1607 b2caps.update(bundle2.decodecaps(blob))
1612 bundler = bundle2.bundle20(repo.ui, b2caps)
1608 bundler = bundle2.bundle20(repo.ui, b2caps)
1613
1609
1614 kwargs['heads'] = heads
1610 kwargs['heads'] = heads
1615 kwargs['common'] = common
1611 kwargs['common'] = common
1616
1612
1617 for name in getbundle2partsorder:
1613 for name in getbundle2partsorder:
1618 func = getbundle2partsmapping[name]
1614 func = getbundle2partsmapping[name]
1619 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1615 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1620 **pycompat.strkwargs(kwargs))
1616 **pycompat.strkwargs(kwargs))
1621
1617
1622 return bundler.getchunks()
1618 return bundler.getchunks()
1623
1619
1624 @getbundle2partsgenerator('changegroup')
1620 @getbundle2partsgenerator('changegroup')
1625 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1621 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1626 b2caps=None, heads=None, common=None, **kwargs):
1622 b2caps=None, heads=None, common=None, **kwargs):
1627 """add a changegroup part to the requested bundle"""
1623 """add a changegroup part to the requested bundle"""
1628 cg = None
1624 cg = None
1629 if kwargs.get('cg', True):
1625 if kwargs.get('cg', True):
1630 # build changegroup bundle here.
1626 # build changegroup bundle here.
1631 version = '01'
1627 version = '01'
1632 cgversions = b2caps.get('changegroup')
1628 cgversions = b2caps.get('changegroup')
1633 if cgversions: # 3.1 and 3.2 ship with an empty value
1629 if cgversions: # 3.1 and 3.2 ship with an empty value
1634 cgversions = [v for v in cgversions
1630 cgversions = [v for v in cgversions
1635 if v in changegroup.supportedoutgoingversions(repo)]
1631 if v in changegroup.supportedoutgoingversions(repo)]
1636 if not cgversions:
1632 if not cgversions:
1637 raise ValueError(_('no common changegroup version'))
1633 raise ValueError(_('no common changegroup version'))
1638 version = max(cgversions)
1634 version = max(cgversions)
1639 outgoing = _computeoutgoing(repo, heads, common)
1635 outgoing = _computeoutgoing(repo, heads, common)
1640 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1636 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1641 bundlecaps=bundlecaps,
1637 bundlecaps=bundlecaps,
1642 version=version)
1638 version=version)
1643
1639
1644 if cg:
1640 if cg:
1645 part = bundler.newpart('changegroup', data=cg)
1641 part = bundler.newpart('changegroup', data=cg)
1646 if cgversions:
1642 if cgversions:
1647 part.addparam('version', version)
1643 part.addparam('version', version)
1648 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1644 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1649 if 'treemanifest' in repo.requirements:
1645 if 'treemanifest' in repo.requirements:
1650 part.addparam('treemanifest', '1')
1646 part.addparam('treemanifest', '1')
1651
1647
1652 @getbundle2partsgenerator('listkeys')
1648 @getbundle2partsgenerator('listkeys')
1653 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1649 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1654 b2caps=None, **kwargs):
1650 b2caps=None, **kwargs):
1655 """add parts containing listkeys namespaces to the requested bundle"""
1651 """add parts containing listkeys namespaces to the requested bundle"""
1656 listkeys = kwargs.get('listkeys', ())
1652 listkeys = kwargs.get('listkeys', ())
1657 for namespace in listkeys:
1653 for namespace in listkeys:
1658 part = bundler.newpart('listkeys')
1654 part = bundler.newpart('listkeys')
1659 part.addparam('namespace', namespace)
1655 part.addparam('namespace', namespace)
1660 keys = repo.listkeys(namespace).items()
1656 keys = repo.listkeys(namespace).items()
1661 part.data = pushkey.encodekeys(keys)
1657 part.data = pushkey.encodekeys(keys)
1662
1658
1663 @getbundle2partsgenerator('obsmarkers')
1659 @getbundle2partsgenerator('obsmarkers')
1664 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1660 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1665 b2caps=None, heads=None, **kwargs):
1661 b2caps=None, heads=None, **kwargs):
1666 """add an obsolescence markers part to the requested bundle"""
1662 """add an obsolescence markers part to the requested bundle"""
1667 if kwargs.get('obsmarkers', False):
1663 if kwargs.get('obsmarkers', False):
1668 if heads is None:
1664 if heads is None:
1669 heads = repo.heads()
1665 heads = repo.heads()
1670 subset = [c.node() for c in repo.set('::%ln', heads)]
1666 subset = [c.node() for c in repo.set('::%ln', heads)]
1671 markers = repo.obsstore.relevantmarkers(subset)
1667 markers = repo.obsstore.relevantmarkers(subset)
1672 markers = sorted(markers)
1668 markers = sorted(markers)
1673 bundle2.buildobsmarkerspart(bundler, markers)
1669 bundle2.buildobsmarkerspart(bundler, markers)
1674
1670
1675 @getbundle2partsgenerator('hgtagsfnodes')
1671 @getbundle2partsgenerator('hgtagsfnodes')
1676 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1672 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1677 b2caps=None, heads=None, common=None,
1673 b2caps=None, heads=None, common=None,
1678 **kwargs):
1674 **kwargs):
1679 """Transfer the .hgtags filenodes mapping.
1675 """Transfer the .hgtags filenodes mapping.
1680
1676
1681 Only values for heads in this bundle will be transferred.
1677 Only values for heads in this bundle will be transferred.
1682
1678
1683 The part data consists of pairs of 20 byte changeset node and .hgtags
1679 The part data consists of pairs of 20 byte changeset node and .hgtags
1684 filenodes raw values.
1680 filenodes raw values.
1685 """
1681 """
1686 # Don't send unless:
1682 # Don't send unless:
1687 # - changeset are being exchanged,
1683 # - changeset are being exchanged,
1688 # - the client supports it.
1684 # - the client supports it.
1689 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1685 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1690 return
1686 return
1691
1687
1692 outgoing = _computeoutgoing(repo, heads, common)
1688 outgoing = _computeoutgoing(repo, heads, common)
1693 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1689 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1694
1690
1695 def _getbookmarks(repo, **kwargs):
1691 def _getbookmarks(repo, **kwargs):
1696 """Returns bookmark to node mapping.
1692 """Returns bookmark to node mapping.
1697
1693
1698 This function is primarily used to generate `bookmarks` bundle2 part.
1694 This function is primarily used to generate `bookmarks` bundle2 part.
1699 It is a separate function in order to make it easy to wrap it
1695 It is a separate function in order to make it easy to wrap it
1700 in extensions. Passing `kwargs` to the function makes it easy to
1696 in extensions. Passing `kwargs` to the function makes it easy to
1701 add new parameters in extensions.
1697 add new parameters in extensions.
1702 """
1698 """
1703
1699
1704 return dict(bookmod.listbinbookmarks(repo))
1700 return dict(bookmod.listbinbookmarks(repo))
1705
1701
1706 def check_heads(repo, their_heads, context):
1702 def check_heads(repo, their_heads, context):
1707 """check if the heads of a repo have been modified
1703 """check if the heads of a repo have been modified
1708
1704
1709 Used by peer for unbundling.
1705 Used by peer for unbundling.
1710 """
1706 """
1711 heads = repo.heads()
1707 heads = repo.heads()
1712 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1708 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1713 if not (their_heads == ['force'] or their_heads == heads or
1709 if not (their_heads == ['force'] or their_heads == heads or
1714 their_heads == ['hashed', heads_hash]):
1710 their_heads == ['hashed', heads_hash]):
1715 # someone else committed/pushed/unbundled while we
1711 # someone else committed/pushed/unbundled while we
1716 # were transferring data
1712 # were transferring data
1717 raise error.PushRaced('repository changed while %s - '
1713 raise error.PushRaced('repository changed while %s - '
1718 'please try again' % context)
1714 'please try again' % context)
1719
1715
1720 def unbundle(repo, cg, heads, source, url):
1716 def unbundle(repo, cg, heads, source, url):
1721 """Apply a bundle to a repo.
1717 """Apply a bundle to a repo.
1722
1718
1723 this function makes sure the repo is locked during the application and have
1719 this function makes sure the repo is locked during the application and have
1724 mechanism to check that no push race occurred between the creation of the
1720 mechanism to check that no push race occurred between the creation of the
1725 bundle and its application.
1721 bundle and its application.
1726
1722
1727 If the push was raced as PushRaced exception is raised."""
1723 If the push was raced as PushRaced exception is raised."""
1728 r = 0
1724 r = 0
1729 # need a transaction when processing a bundle2 stream
1725 # need a transaction when processing a bundle2 stream
1730 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1726 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1731 lockandtr = [None, None, None]
1727 lockandtr = [None, None, None]
1732 recordout = None
1728 recordout = None
1733 # quick fix for output mismatch with bundle2 in 3.4
1729 # quick fix for output mismatch with bundle2 in 3.4
1734 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1730 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1735 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1731 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1736 captureoutput = True
1732 captureoutput = True
1737 try:
1733 try:
1738 # note: outside bundle1, 'heads' is expected to be empty and this
1734 # note: outside bundle1, 'heads' is expected to be empty and this
1739 # 'check_heads' call wil be a no-op
1735 # 'check_heads' call wil be a no-op
1740 check_heads(repo, heads, 'uploading changes')
1736 check_heads(repo, heads, 'uploading changes')
1741 # push can proceed
1737 # push can proceed
1742 if not isinstance(cg, bundle2.unbundle20):
1738 if not isinstance(cg, bundle2.unbundle20):
1743 # legacy case: bundle1 (changegroup 01)
1739 # legacy case: bundle1 (changegroup 01)
1744 txnname = "\n".join([source, util.hidepassword(url)])
1740 txnname = "\n".join([source, util.hidepassword(url)])
1745 with repo.lock(), repo.transaction(txnname) as tr:
1741 with repo.lock(), repo.transaction(txnname) as tr:
1746 op = bundle2.applybundle(repo, cg, tr, source, url)
1742 op = bundle2.applybundle(repo, cg, tr, source, url)
1747 r = bundle2.combinechangegroupresults(op)
1743 r = bundle2.combinechangegroupresults(op)
1748 else:
1744 else:
1749 r = None
1745 r = None
1750 try:
1746 try:
1751 def gettransaction():
1747 def gettransaction():
1752 if not lockandtr[2]:
1748 if not lockandtr[2]:
1753 lockandtr[0] = repo.wlock()
1749 lockandtr[0] = repo.wlock()
1754 lockandtr[1] = repo.lock()
1750 lockandtr[1] = repo.lock()
1755 lockandtr[2] = repo.transaction(source)
1751 lockandtr[2] = repo.transaction(source)
1756 lockandtr[2].hookargs['source'] = source
1752 lockandtr[2].hookargs['source'] = source
1757 lockandtr[2].hookargs['url'] = url
1753 lockandtr[2].hookargs['url'] = url
1758 lockandtr[2].hookargs['bundle2'] = '1'
1754 lockandtr[2].hookargs['bundle2'] = '1'
1759 return lockandtr[2]
1755 return lockandtr[2]
1760
1756
1761 # Do greedy locking by default until we're satisfied with lazy
1757 # Do greedy locking by default until we're satisfied with lazy
1762 # locking.
1758 # locking.
1763 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1759 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1764 gettransaction()
1760 gettransaction()
1765
1761
1766 op = bundle2.bundleoperation(repo, gettransaction,
1762 op = bundle2.bundleoperation(repo, gettransaction,
1767 captureoutput=captureoutput)
1763 captureoutput=captureoutput)
1768 try:
1764 try:
1769 op = bundle2.processbundle(repo, cg, op=op)
1765 op = bundle2.processbundle(repo, cg, op=op)
1770 finally:
1766 finally:
1771 r = op.reply
1767 r = op.reply
1772 if captureoutput and r is not None:
1768 if captureoutput and r is not None:
1773 repo.ui.pushbuffer(error=True, subproc=True)
1769 repo.ui.pushbuffer(error=True, subproc=True)
1774 def recordout(output):
1770 def recordout(output):
1775 r.newpart('output', data=output, mandatory=False)
1771 r.newpart('output', data=output, mandatory=False)
1776 if lockandtr[2] is not None:
1772 if lockandtr[2] is not None:
1777 lockandtr[2].close()
1773 lockandtr[2].close()
1778 except BaseException as exc:
1774 except BaseException as exc:
1779 exc.duringunbundle2 = True
1775 exc.duringunbundle2 = True
1780 if captureoutput and r is not None:
1776 if captureoutput and r is not None:
1781 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1777 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1782 def recordout(output):
1778 def recordout(output):
1783 part = bundle2.bundlepart('output', data=output,
1779 part = bundle2.bundlepart('output', data=output,
1784 mandatory=False)
1780 mandatory=False)
1785 parts.append(part)
1781 parts.append(part)
1786 raise
1782 raise
1787 finally:
1783 finally:
1788 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1784 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1789 if recordout is not None:
1785 if recordout is not None:
1790 recordout(repo.ui.popbuffer())
1786 recordout(repo.ui.popbuffer())
1791 return r
1787 return r
1792
1788
1793 def _maybeapplyclonebundle(pullop):
1789 def _maybeapplyclonebundle(pullop):
1794 """Apply a clone bundle from a remote, if possible."""
1790 """Apply a clone bundle from a remote, if possible."""
1795
1791
1796 repo = pullop.repo
1792 repo = pullop.repo
1797 remote = pullop.remote
1793 remote = pullop.remote
1798
1794
1799 if not repo.ui.configbool('ui', 'clonebundles'):
1795 if not repo.ui.configbool('ui', 'clonebundles'):
1800 return
1796 return
1801
1797
1802 # Only run if local repo is empty.
1798 # Only run if local repo is empty.
1803 if len(repo):
1799 if len(repo):
1804 return
1800 return
1805
1801
1806 if pullop.heads:
1802 if pullop.heads:
1807 return
1803 return
1808
1804
1809 if not remote.capable('clonebundles'):
1805 if not remote.capable('clonebundles'):
1810 return
1806 return
1811
1807
1812 res = remote._call('clonebundles')
1808 res = remote._call('clonebundles')
1813
1809
1814 # If we call the wire protocol command, that's good enough to record the
1810 # If we call the wire protocol command, that's good enough to record the
1815 # attempt.
1811 # attempt.
1816 pullop.clonebundleattempted = True
1812 pullop.clonebundleattempted = True
1817
1813
1818 entries = parseclonebundlesmanifest(repo, res)
1814 entries = parseclonebundlesmanifest(repo, res)
1819 if not entries:
1815 if not entries:
1820 repo.ui.note(_('no clone bundles available on remote; '
1816 repo.ui.note(_('no clone bundles available on remote; '
1821 'falling back to regular clone\n'))
1817 'falling back to regular clone\n'))
1822 return
1818 return
1823
1819
1824 entries = filterclonebundleentries(repo, entries)
1820 entries = filterclonebundleentries(repo, entries)
1825 if not entries:
1821 if not entries:
1826 # There is a thundering herd concern here. However, if a server
1822 # There is a thundering herd concern here. However, if a server
1827 # operator doesn't advertise bundles appropriate for its clients,
1823 # operator doesn't advertise bundles appropriate for its clients,
1828 # they deserve what's coming. Furthermore, from a client's
1824 # they deserve what's coming. Furthermore, from a client's
1829 # perspective, no automatic fallback would mean not being able to
1825 # perspective, no automatic fallback would mean not being able to
1830 # clone!
1826 # clone!
1831 repo.ui.warn(_('no compatible clone bundles available on server; '
1827 repo.ui.warn(_('no compatible clone bundles available on server; '
1832 'falling back to regular clone\n'))
1828 'falling back to regular clone\n'))
1833 repo.ui.warn(_('(you may want to report this to the server '
1829 repo.ui.warn(_('(you may want to report this to the server '
1834 'operator)\n'))
1830 'operator)\n'))
1835 return
1831 return
1836
1832
1837 entries = sortclonebundleentries(repo.ui, entries)
1833 entries = sortclonebundleentries(repo.ui, entries)
1838
1834
1839 url = entries[0]['URL']
1835 url = entries[0]['URL']
1840 repo.ui.status(_('applying clone bundle from %s\n') % url)
1836 repo.ui.status(_('applying clone bundle from %s\n') % url)
1841 if trypullbundlefromurl(repo.ui, repo, url):
1837 if trypullbundlefromurl(repo.ui, repo, url):
1842 repo.ui.status(_('finished applying clone bundle\n'))
1838 repo.ui.status(_('finished applying clone bundle\n'))
1843 # Bundle failed.
1839 # Bundle failed.
1844 #
1840 #
1845 # We abort by default to avoid the thundering herd of
1841 # We abort by default to avoid the thundering herd of
1846 # clients flooding a server that was expecting expensive
1842 # clients flooding a server that was expecting expensive
1847 # clone load to be offloaded.
1843 # clone load to be offloaded.
1848 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1844 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1849 repo.ui.warn(_('falling back to normal clone\n'))
1845 repo.ui.warn(_('falling back to normal clone\n'))
1850 else:
1846 else:
1851 raise error.Abort(_('error applying bundle'),
1847 raise error.Abort(_('error applying bundle'),
1852 hint=_('if this error persists, consider contacting '
1848 hint=_('if this error persists, consider contacting '
1853 'the server operator or disable clone '
1849 'the server operator or disable clone '
1854 'bundles via '
1850 'bundles via '
1855 '"--config ui.clonebundles=false"'))
1851 '"--config ui.clonebundles=false"'))
1856
1852
1857 def parseclonebundlesmanifest(repo, s):
1853 def parseclonebundlesmanifest(repo, s):
1858 """Parses the raw text of a clone bundles manifest.
1854 """Parses the raw text of a clone bundles manifest.
1859
1855
1860 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1856 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1861 to the URL and other keys are the attributes for the entry.
1857 to the URL and other keys are the attributes for the entry.
1862 """
1858 """
1863 m = []
1859 m = []
1864 for line in s.splitlines():
1860 for line in s.splitlines():
1865 fields = line.split()
1861 fields = line.split()
1866 if not fields:
1862 if not fields:
1867 continue
1863 continue
1868 attrs = {'URL': fields[0]}
1864 attrs = {'URL': fields[0]}
1869 for rawattr in fields[1:]:
1865 for rawattr in fields[1:]:
1870 key, value = rawattr.split('=', 1)
1866 key, value = rawattr.split('=', 1)
1871 key = urlreq.unquote(key)
1867 key = urlreq.unquote(key)
1872 value = urlreq.unquote(value)
1868 value = urlreq.unquote(value)
1873 attrs[key] = value
1869 attrs[key] = value
1874
1870
1875 # Parse BUNDLESPEC into components. This makes client-side
1871 # Parse BUNDLESPEC into components. This makes client-side
1876 # preferences easier to specify since you can prefer a single
1872 # preferences easier to specify since you can prefer a single
1877 # component of the BUNDLESPEC.
1873 # component of the BUNDLESPEC.
1878 if key == 'BUNDLESPEC':
1874 if key == 'BUNDLESPEC':
1879 try:
1875 try:
1880 comp, version, params = parsebundlespec(repo, value,
1876 comp, version, params = parsebundlespec(repo, value,
1881 externalnames=True)
1877 externalnames=True)
1882 attrs['COMPRESSION'] = comp
1878 attrs['COMPRESSION'] = comp
1883 attrs['VERSION'] = version
1879 attrs['VERSION'] = version
1884 except error.InvalidBundleSpecification:
1880 except error.InvalidBundleSpecification:
1885 pass
1881 pass
1886 except error.UnsupportedBundleSpecification:
1882 except error.UnsupportedBundleSpecification:
1887 pass
1883 pass
1888
1884
1889 m.append(attrs)
1885 m.append(attrs)
1890
1886
1891 return m
1887 return m
1892
1888
1893 def filterclonebundleentries(repo, entries):
1889 def filterclonebundleentries(repo, entries):
1894 """Remove incompatible clone bundle manifest entries.
1890 """Remove incompatible clone bundle manifest entries.
1895
1891
1896 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1892 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1897 and returns a new list consisting of only the entries that this client
1893 and returns a new list consisting of only the entries that this client
1898 should be able to apply.
1894 should be able to apply.
1899
1895
1900 There is no guarantee we'll be able to apply all returned entries because
1896 There is no guarantee we'll be able to apply all returned entries because
1901 the metadata we use to filter on may be missing or wrong.
1897 the metadata we use to filter on may be missing or wrong.
1902 """
1898 """
1903 newentries = []
1899 newentries = []
1904 for entry in entries:
1900 for entry in entries:
1905 spec = entry.get('BUNDLESPEC')
1901 spec = entry.get('BUNDLESPEC')
1906 if spec:
1902 if spec:
1907 try:
1903 try:
1908 parsebundlespec(repo, spec, strict=True)
1904 parsebundlespec(repo, spec, strict=True)
1909 except error.InvalidBundleSpecification as e:
1905 except error.InvalidBundleSpecification as e:
1910 repo.ui.debug(str(e) + '\n')
1906 repo.ui.debug(str(e) + '\n')
1911 continue
1907 continue
1912 except error.UnsupportedBundleSpecification as e:
1908 except error.UnsupportedBundleSpecification as e:
1913 repo.ui.debug('filtering %s because unsupported bundle '
1909 repo.ui.debug('filtering %s because unsupported bundle '
1914 'spec: %s\n' % (entry['URL'], str(e)))
1910 'spec: %s\n' % (entry['URL'], str(e)))
1915 continue
1911 continue
1916
1912
1917 if 'REQUIRESNI' in entry and not sslutil.hassni:
1913 if 'REQUIRESNI' in entry and not sslutil.hassni:
1918 repo.ui.debug('filtering %s because SNI not supported\n' %
1914 repo.ui.debug('filtering %s because SNI not supported\n' %
1919 entry['URL'])
1915 entry['URL'])
1920 continue
1916 continue
1921
1917
1922 newentries.append(entry)
1918 newentries.append(entry)
1923
1919
1924 return newentries
1920 return newentries
1925
1921
1926 class clonebundleentry(object):
1922 class clonebundleentry(object):
1927 """Represents an item in a clone bundles manifest.
1923 """Represents an item in a clone bundles manifest.
1928
1924
1929 This rich class is needed to support sorting since sorted() in Python 3
1925 This rich class is needed to support sorting since sorted() in Python 3
1930 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1926 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1931 won't work.
1927 won't work.
1932 """
1928 """
1933
1929
1934 def __init__(self, value, prefers):
1930 def __init__(self, value, prefers):
1935 self.value = value
1931 self.value = value
1936 self.prefers = prefers
1932 self.prefers = prefers
1937
1933
1938 def _cmp(self, other):
1934 def _cmp(self, other):
1939 for prefkey, prefvalue in self.prefers:
1935 for prefkey, prefvalue in self.prefers:
1940 avalue = self.value.get(prefkey)
1936 avalue = self.value.get(prefkey)
1941 bvalue = other.value.get(prefkey)
1937 bvalue = other.value.get(prefkey)
1942
1938
1943 # Special case for b missing attribute and a matches exactly.
1939 # Special case for b missing attribute and a matches exactly.
1944 if avalue is not None and bvalue is None and avalue == prefvalue:
1940 if avalue is not None and bvalue is None and avalue == prefvalue:
1945 return -1
1941 return -1
1946
1942
1947 # Special case for a missing attribute and b matches exactly.
1943 # Special case for a missing attribute and b matches exactly.
1948 if bvalue is not None and avalue is None and bvalue == prefvalue:
1944 if bvalue is not None and avalue is None and bvalue == prefvalue:
1949 return 1
1945 return 1
1950
1946
1951 # We can't compare unless attribute present on both.
1947 # We can't compare unless attribute present on both.
1952 if avalue is None or bvalue is None:
1948 if avalue is None or bvalue is None:
1953 continue
1949 continue
1954
1950
1955 # Same values should fall back to next attribute.
1951 # Same values should fall back to next attribute.
1956 if avalue == bvalue:
1952 if avalue == bvalue:
1957 continue
1953 continue
1958
1954
1959 # Exact matches come first.
1955 # Exact matches come first.
1960 if avalue == prefvalue:
1956 if avalue == prefvalue:
1961 return -1
1957 return -1
1962 if bvalue == prefvalue:
1958 if bvalue == prefvalue:
1963 return 1
1959 return 1
1964
1960
1965 # Fall back to next attribute.
1961 # Fall back to next attribute.
1966 continue
1962 continue
1967
1963
1968 # If we got here we couldn't sort by attributes and prefers. Fall
1964 # If we got here we couldn't sort by attributes and prefers. Fall
1969 # back to index order.
1965 # back to index order.
1970 return 0
1966 return 0
1971
1967
1972 def __lt__(self, other):
1968 def __lt__(self, other):
1973 return self._cmp(other) < 0
1969 return self._cmp(other) < 0
1974
1970
1975 def __gt__(self, other):
1971 def __gt__(self, other):
1976 return self._cmp(other) > 0
1972 return self._cmp(other) > 0
1977
1973
1978 def __eq__(self, other):
1974 def __eq__(self, other):
1979 return self._cmp(other) == 0
1975 return self._cmp(other) == 0
1980
1976
1981 def __le__(self, other):
1977 def __le__(self, other):
1982 return self._cmp(other) <= 0
1978 return self._cmp(other) <= 0
1983
1979
1984 def __ge__(self, other):
1980 def __ge__(self, other):
1985 return self._cmp(other) >= 0
1981 return self._cmp(other) >= 0
1986
1982
1987 def __ne__(self, other):
1983 def __ne__(self, other):
1988 return self._cmp(other) != 0
1984 return self._cmp(other) != 0
1989
1985
1990 def sortclonebundleentries(ui, entries):
1986 def sortclonebundleentries(ui, entries):
1991 prefers = ui.configlist('ui', 'clonebundleprefers')
1987 prefers = ui.configlist('ui', 'clonebundleprefers')
1992 if not prefers:
1988 if not prefers:
1993 return list(entries)
1989 return list(entries)
1994
1990
1995 prefers = [p.split('=', 1) for p in prefers]
1991 prefers = [p.split('=', 1) for p in prefers]
1996
1992
1997 items = sorted(clonebundleentry(v, prefers) for v in entries)
1993 items = sorted(clonebundleentry(v, prefers) for v in entries)
1998 return [i.value for i in items]
1994 return [i.value for i in items]
1999
1995
2000 def trypullbundlefromurl(ui, repo, url):
1996 def trypullbundlefromurl(ui, repo, url):
2001 """Attempt to apply a bundle from a URL."""
1997 """Attempt to apply a bundle from a URL."""
2002 with repo.lock(), repo.transaction('bundleurl') as tr:
1998 with repo.lock(), repo.transaction('bundleurl') as tr:
2003 try:
1999 try:
2004 fh = urlmod.open(ui, url)
2000 fh = urlmod.open(ui, url)
2005 cg = readbundle(ui, fh, 'stream')
2001 cg = readbundle(ui, fh, 'stream')
2006
2002
2007 if isinstance(cg, streamclone.streamcloneapplier):
2003 if isinstance(cg, streamclone.streamcloneapplier):
2008 cg.apply(repo)
2004 cg.apply(repo)
2009 else:
2005 else:
2010 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2006 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2011 return True
2007 return True
2012 except urlerr.httperror as e:
2008 except urlerr.httperror as e:
2013 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2009 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2014 except urlerr.urlerror as e:
2010 except urlerr.urlerror as e:
2015 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2011 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2016
2012
2017 return False
2013 return False
General Comments 0
You need to be logged in to leave comments. Login now