##// END OF EJS Templates
configitems: register the 'bundle.reorder' config
marmoute -
r33180:9f95f0bb default
parent child Browse files
Show More
@@ -1,1007 +1,1007 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 dagutil,
23 dagutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def writechunks(ui, chunks, filename, vfs=None):
63 def writechunks(ui, chunks, filename, vfs=None):
64 """Write chunks to a file and return its filename.
64 """Write chunks to a file and return its filename.
65
65
66 The stream is assumed to be a bundle file.
66 The stream is assumed to be a bundle file.
67 Existing files will not be overwritten.
67 Existing files will not be overwritten.
68 If no filename is specified, a temporary file is created.
68 If no filename is specified, a temporary file is created.
69 """
69 """
70 fh = None
70 fh = None
71 cleanup = None
71 cleanup = None
72 try:
72 try:
73 if filename:
73 if filename:
74 if vfs:
74 if vfs:
75 fh = vfs.open(filename, "wb")
75 fh = vfs.open(filename, "wb")
76 else:
76 else:
77 # Increase default buffer size because default is usually
77 # Increase default buffer size because default is usually
78 # small (4k is common on Linux).
78 # small (4k is common on Linux).
79 fh = open(filename, "wb", 131072)
79 fh = open(filename, "wb", 131072)
80 else:
80 else:
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
83 cleanup = filename
83 cleanup = filename
84 for c in chunks:
84 for c in chunks:
85 fh.write(c)
85 fh.write(c)
86 cleanup = None
86 cleanup = None
87 return filename
87 return filename
88 finally:
88 finally:
89 if fh is not None:
89 if fh is not None:
90 fh.close()
90 fh.close()
91 if cleanup is not None:
91 if cleanup is not None:
92 if filename and vfs:
92 if filename and vfs:
93 vfs.unlink(cleanup)
93 vfs.unlink(cleanup)
94 else:
94 else:
95 os.unlink(cleanup)
95 os.unlink(cleanup)
96
96
97 class cg1unpacker(object):
97 class cg1unpacker(object):
98 """Unpacker for cg1 changegroup streams.
98 """Unpacker for cg1 changegroup streams.
99
99
100 A changegroup unpacker handles the framing of the revision data in
100 A changegroup unpacker handles the framing of the revision data in
101 the wire format. Most consumers will want to use the apply()
101 the wire format. Most consumers will want to use the apply()
102 method to add the changes from the changegroup to a repository.
102 method to add the changes from the changegroup to a repository.
103
103
104 If you're forwarding a changegroup unmodified to another consumer,
104 If you're forwarding a changegroup unmodified to another consumer,
105 use getchunks(), which returns an iterator of changegroup
105 use getchunks(), which returns an iterator of changegroup
106 chunks. This is mostly useful for cases where you need to know the
106 chunks. This is mostly useful for cases where you need to know the
107 data stream has ended by observing the end of the changegroup.
107 data stream has ended by observing the end of the changegroup.
108
108
109 deltachunk() is useful only if you're applying delta data. Most
109 deltachunk() is useful only if you're applying delta data. Most
110 consumers should prefer apply() instead.
110 consumers should prefer apply() instead.
111
111
112 A few other public methods exist. Those are used only for
112 A few other public methods exist. Those are used only for
113 bundlerepo and some debug commands - their use is discouraged.
113 bundlerepo and some debug commands - their use is discouraged.
114 """
114 """
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
116 deltaheadersize = struct.calcsize(deltaheader)
116 deltaheadersize = struct.calcsize(deltaheader)
117 version = '01'
117 version = '01'
118 _grouplistcount = 1 # One list of files after the manifests
118 _grouplistcount = 1 # One list of files after the manifests
119
119
120 def __init__(self, fh, alg, extras=None):
120 def __init__(self, fh, alg, extras=None):
121 if alg is None:
121 if alg is None:
122 alg = 'UN'
122 alg = 'UN'
123 if alg not in util.compengines.supportedbundletypes:
123 if alg not in util.compengines.supportedbundletypes:
124 raise error.Abort(_('unknown stream compression type: %s')
124 raise error.Abort(_('unknown stream compression type: %s')
125 % alg)
125 % alg)
126 if alg == 'BZ':
126 if alg == 'BZ':
127 alg = '_truncatedBZ'
127 alg = '_truncatedBZ'
128
128
129 compengine = util.compengines.forbundletype(alg)
129 compengine = util.compengines.forbundletype(alg)
130 self._stream = compengine.decompressorreader(fh)
130 self._stream = compengine.decompressorreader(fh)
131 self._type = alg
131 self._type = alg
132 self.extras = extras or {}
132 self.extras = extras or {}
133 self.callback = None
133 self.callback = None
134
134
135 # These methods (compressed, read, seek, tell) all appear to only
135 # These methods (compressed, read, seek, tell) all appear to only
136 # be used by bundlerepo, but it's a little hard to tell.
136 # be used by bundlerepo, but it's a little hard to tell.
137 def compressed(self):
137 def compressed(self):
138 return self._type is not None and self._type != 'UN'
138 return self._type is not None and self._type != 'UN'
139 def read(self, l):
139 def read(self, l):
140 return self._stream.read(l)
140 return self._stream.read(l)
141 def seek(self, pos):
141 def seek(self, pos):
142 return self._stream.seek(pos)
142 return self._stream.seek(pos)
143 def tell(self):
143 def tell(self):
144 return self._stream.tell()
144 return self._stream.tell()
145 def close(self):
145 def close(self):
146 return self._stream.close()
146 return self._stream.close()
147
147
148 def _chunklength(self):
148 def _chunklength(self):
149 d = readexactly(self._stream, 4)
149 d = readexactly(self._stream, 4)
150 l = struct.unpack(">l", d)[0]
150 l = struct.unpack(">l", d)[0]
151 if l <= 4:
151 if l <= 4:
152 if l:
152 if l:
153 raise error.Abort(_("invalid chunk length %d") % l)
153 raise error.Abort(_("invalid chunk length %d") % l)
154 return 0
154 return 0
155 if self.callback:
155 if self.callback:
156 self.callback()
156 self.callback()
157 return l - 4
157 return l - 4
158
158
159 def changelogheader(self):
159 def changelogheader(self):
160 """v10 does not have a changelog header chunk"""
160 """v10 does not have a changelog header chunk"""
161 return {}
161 return {}
162
162
163 def manifestheader(self):
163 def manifestheader(self):
164 """v10 does not have a manifest header chunk"""
164 """v10 does not have a manifest header chunk"""
165 return {}
165 return {}
166
166
167 def filelogheader(self):
167 def filelogheader(self):
168 """return the header of the filelogs chunk, v10 only has the filename"""
168 """return the header of the filelogs chunk, v10 only has the filename"""
169 l = self._chunklength()
169 l = self._chunklength()
170 if not l:
170 if not l:
171 return {}
171 return {}
172 fname = readexactly(self._stream, l)
172 fname = readexactly(self._stream, l)
173 return {'filename': fname}
173 return {'filename': fname}
174
174
175 def _deltaheader(self, headertuple, prevnode):
175 def _deltaheader(self, headertuple, prevnode):
176 node, p1, p2, cs = headertuple
176 node, p1, p2, cs = headertuple
177 if prevnode is None:
177 if prevnode is None:
178 deltabase = p1
178 deltabase = p1
179 else:
179 else:
180 deltabase = prevnode
180 deltabase = prevnode
181 flags = 0
181 flags = 0
182 return node, p1, p2, deltabase, cs, flags
182 return node, p1, p2, deltabase, cs, flags
183
183
184 def deltachunk(self, prevnode):
184 def deltachunk(self, prevnode):
185 l = self._chunklength()
185 l = self._chunklength()
186 if not l:
186 if not l:
187 return {}
187 return {}
188 headerdata = readexactly(self._stream, self.deltaheadersize)
188 headerdata = readexactly(self._stream, self.deltaheadersize)
189 header = struct.unpack(self.deltaheader, headerdata)
189 header = struct.unpack(self.deltaheader, headerdata)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
194
194
195 def getchunks(self):
195 def getchunks(self):
196 """returns all the chunks contains in the bundle
196 """returns all the chunks contains in the bundle
197
197
198 Used when you need to forward the binary stream to a file or another
198 Used when you need to forward the binary stream to a file or another
199 network API. To do so, it parse the changegroup data, otherwise it will
199 network API. To do so, it parse the changegroup data, otherwise it will
200 block in case of sshrepo because it don't know the end of the stream.
200 block in case of sshrepo because it don't know the end of the stream.
201 """
201 """
202 # an empty chunkgroup is the end of the changegroup
202 # an empty chunkgroup is the end of the changegroup
203 # a changegroup has at least 2 chunkgroups (changelog and manifest).
203 # a changegroup has at least 2 chunkgroups (changelog and manifest).
204 # after that, changegroup versions 1 and 2 have a series of groups
204 # after that, changegroup versions 1 and 2 have a series of groups
205 # with one group per file. changegroup 3 has a series of directory
205 # with one group per file. changegroup 3 has a series of directory
206 # manifests before the files.
206 # manifests before the files.
207 count = 0
207 count = 0
208 emptycount = 0
208 emptycount = 0
209 while emptycount < self._grouplistcount:
209 while emptycount < self._grouplistcount:
210 empty = True
210 empty = True
211 count += 1
211 count += 1
212 while True:
212 while True:
213 chunk = getchunk(self)
213 chunk = getchunk(self)
214 if not chunk:
214 if not chunk:
215 if empty and count > 2:
215 if empty and count > 2:
216 emptycount += 1
216 emptycount += 1
217 break
217 break
218 empty = False
218 empty = False
219 yield chunkheader(len(chunk))
219 yield chunkheader(len(chunk))
220 pos = 0
220 pos = 0
221 while pos < len(chunk):
221 while pos < len(chunk):
222 next = pos + 2**20
222 next = pos + 2**20
223 yield chunk[pos:next]
223 yield chunk[pos:next]
224 pos = next
224 pos = next
225 yield closechunk()
225 yield closechunk()
226
226
227 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
227 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
228 # We know that we'll never have more manifests than we had
228 # We know that we'll never have more manifests than we had
229 # changesets.
229 # changesets.
230 self.callback = prog(_('manifests'), numchanges)
230 self.callback = prog(_('manifests'), numchanges)
231 # no need to check for empty manifest group here:
231 # no need to check for empty manifest group here:
232 # if the result of the merge of 1 and 2 is the same in 3 and 4,
232 # if the result of the merge of 1 and 2 is the same in 3 and 4,
233 # no new manifest will be created and the manifest group will
233 # no new manifest will be created and the manifest group will
234 # be empty during the pull
234 # be empty during the pull
235 self.manifestheader()
235 self.manifestheader()
236 repo.manifestlog._revlog.addgroup(self, revmap, trp)
236 repo.manifestlog._revlog.addgroup(self, revmap, trp)
237 repo.ui.progress(_('manifests'), None)
237 repo.ui.progress(_('manifests'), None)
238 self.callback = None
238 self.callback = None
239
239
240 def apply(self, repo, tr, srctype, url, emptyok=False,
240 def apply(self, repo, tr, srctype, url, emptyok=False,
241 targetphase=phases.draft, expectedtotal=None):
241 targetphase=phases.draft, expectedtotal=None):
242 """Add the changegroup returned by source.read() to this repo.
242 """Add the changegroup returned by source.read() to this repo.
243 srctype is a string like 'push', 'pull', or 'unbundle'. url is
243 srctype is a string like 'push', 'pull', or 'unbundle'. url is
244 the URL of the repo where this changegroup is coming from.
244 the URL of the repo where this changegroup is coming from.
245
245
246 Return an integer summarizing the change to this repo:
246 Return an integer summarizing the change to this repo:
247 - nothing changed or no source: 0
247 - nothing changed or no source: 0
248 - more heads than before: 1+added heads (2..n)
248 - more heads than before: 1+added heads (2..n)
249 - fewer heads than before: -1-removed heads (-2..-n)
249 - fewer heads than before: -1-removed heads (-2..-n)
250 - number of heads stays the same: 1
250 - number of heads stays the same: 1
251 """
251 """
252 repo = repo.unfiltered()
252 repo = repo.unfiltered()
253 def csmap(x):
253 def csmap(x):
254 repo.ui.debug("add changeset %s\n" % short(x))
254 repo.ui.debug("add changeset %s\n" % short(x))
255 return len(cl)
255 return len(cl)
256
256
257 def revmap(x):
257 def revmap(x):
258 return cl.rev(x)
258 return cl.rev(x)
259
259
260 changesets = files = revisions = 0
260 changesets = files = revisions = 0
261
261
262 try:
262 try:
263 # The transaction may already carry source information. In this
263 # The transaction may already carry source information. In this
264 # case we use the top level data. We overwrite the argument
264 # case we use the top level data. We overwrite the argument
265 # because we need to use the top level value (if they exist)
265 # because we need to use the top level value (if they exist)
266 # in this function.
266 # in this function.
267 srctype = tr.hookargs.setdefault('source', srctype)
267 srctype = tr.hookargs.setdefault('source', srctype)
268 url = tr.hookargs.setdefault('url', url)
268 url = tr.hookargs.setdefault('url', url)
269 repo.hook('prechangegroup', throw=True, **tr.hookargs)
269 repo.hook('prechangegroup', throw=True, **tr.hookargs)
270
270
271 # write changelog data to temp files so concurrent readers
271 # write changelog data to temp files so concurrent readers
272 # will not see an inconsistent view
272 # will not see an inconsistent view
273 cl = repo.changelog
273 cl = repo.changelog
274 cl.delayupdate(tr)
274 cl.delayupdate(tr)
275 oldheads = set(cl.heads())
275 oldheads = set(cl.heads())
276
276
277 trp = weakref.proxy(tr)
277 trp = weakref.proxy(tr)
278 # pull off the changeset group
278 # pull off the changeset group
279 repo.ui.status(_("adding changesets\n"))
279 repo.ui.status(_("adding changesets\n"))
280 clstart = len(cl)
280 clstart = len(cl)
281 class prog(object):
281 class prog(object):
282 def __init__(self, step, total):
282 def __init__(self, step, total):
283 self._step = step
283 self._step = step
284 self._total = total
284 self._total = total
285 self._count = 1
285 self._count = 1
286 def __call__(self):
286 def __call__(self):
287 repo.ui.progress(self._step, self._count, unit=_('chunks'),
287 repo.ui.progress(self._step, self._count, unit=_('chunks'),
288 total=self._total)
288 total=self._total)
289 self._count += 1
289 self._count += 1
290 self.callback = prog(_('changesets'), expectedtotal)
290 self.callback = prog(_('changesets'), expectedtotal)
291
291
292 efiles = set()
292 efiles = set()
293 def onchangelog(cl, node):
293 def onchangelog(cl, node):
294 efiles.update(cl.readfiles(node))
294 efiles.update(cl.readfiles(node))
295
295
296 self.changelogheader()
296 self.changelogheader()
297 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
297 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
298 efiles = len(efiles)
298 efiles = len(efiles)
299
299
300 if not (cgnodes or emptyok):
300 if not (cgnodes or emptyok):
301 raise error.Abort(_("received changelog group is empty"))
301 raise error.Abort(_("received changelog group is empty"))
302 clend = len(cl)
302 clend = len(cl)
303 changesets = clend - clstart
303 changesets = clend - clstart
304 repo.ui.progress(_('changesets'), None)
304 repo.ui.progress(_('changesets'), None)
305 self.callback = None
305 self.callback = None
306
306
307 # pull off the manifest group
307 # pull off the manifest group
308 repo.ui.status(_("adding manifests\n"))
308 repo.ui.status(_("adding manifests\n"))
309 self._unpackmanifests(repo, revmap, trp, prog, changesets)
309 self._unpackmanifests(repo, revmap, trp, prog, changesets)
310
310
311 needfiles = {}
311 needfiles = {}
312 if repo.ui.configbool('server', 'validate', default=False):
312 if repo.ui.configbool('server', 'validate', default=False):
313 cl = repo.changelog
313 cl = repo.changelog
314 ml = repo.manifestlog
314 ml = repo.manifestlog
315 # validate incoming csets have their manifests
315 # validate incoming csets have their manifests
316 for cset in xrange(clstart, clend):
316 for cset in xrange(clstart, clend):
317 mfnode = cl.changelogrevision(cset).manifest
317 mfnode = cl.changelogrevision(cset).manifest
318 mfest = ml[mfnode].readdelta()
318 mfest = ml[mfnode].readdelta()
319 # store file cgnodes we must see
319 # store file cgnodes we must see
320 for f, n in mfest.iteritems():
320 for f, n in mfest.iteritems():
321 needfiles.setdefault(f, set()).add(n)
321 needfiles.setdefault(f, set()).add(n)
322
322
323 # process the files
323 # process the files
324 repo.ui.status(_("adding file changes\n"))
324 repo.ui.status(_("adding file changes\n"))
325 newrevs, newfiles = _addchangegroupfiles(
325 newrevs, newfiles = _addchangegroupfiles(
326 repo, self, revmap, trp, efiles, needfiles)
326 repo, self, revmap, trp, efiles, needfiles)
327 revisions += newrevs
327 revisions += newrevs
328 files += newfiles
328 files += newfiles
329
329
330 deltaheads = 0
330 deltaheads = 0
331 if oldheads:
331 if oldheads:
332 heads = cl.heads()
332 heads = cl.heads()
333 deltaheads = len(heads) - len(oldheads)
333 deltaheads = len(heads) - len(oldheads)
334 for h in heads:
334 for h in heads:
335 if h not in oldheads and repo[h].closesbranch():
335 if h not in oldheads and repo[h].closesbranch():
336 deltaheads -= 1
336 deltaheads -= 1
337 htext = ""
337 htext = ""
338 if deltaheads:
338 if deltaheads:
339 htext = _(" (%+d heads)") % deltaheads
339 htext = _(" (%+d heads)") % deltaheads
340
340
341 repo.ui.status(_("added %d changesets"
341 repo.ui.status(_("added %d changesets"
342 " with %d changes to %d files%s\n")
342 " with %d changes to %d files%s\n")
343 % (changesets, revisions, files, htext))
343 % (changesets, revisions, files, htext))
344 repo.invalidatevolatilesets()
344 repo.invalidatevolatilesets()
345
345
346 if changesets > 0:
346 if changesets > 0:
347 if 'node' not in tr.hookargs:
347 if 'node' not in tr.hookargs:
348 tr.hookargs['node'] = hex(cl.node(clstart))
348 tr.hookargs['node'] = hex(cl.node(clstart))
349 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
349 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
350 hookargs = dict(tr.hookargs)
350 hookargs = dict(tr.hookargs)
351 else:
351 else:
352 hookargs = dict(tr.hookargs)
352 hookargs = dict(tr.hookargs)
353 hookargs['node'] = hex(cl.node(clstart))
353 hookargs['node'] = hex(cl.node(clstart))
354 hookargs['node_last'] = hex(cl.node(clend - 1))
354 hookargs['node_last'] = hex(cl.node(clend - 1))
355 repo.hook('pretxnchangegroup', throw=True, **hookargs)
355 repo.hook('pretxnchangegroup', throw=True, **hookargs)
356
356
357 added = [cl.node(r) for r in xrange(clstart, clend)]
357 added = [cl.node(r) for r in xrange(clstart, clend)]
358 if srctype in ('push', 'serve'):
358 if srctype in ('push', 'serve'):
359 # Old servers can not push the boundary themselves.
359 # Old servers can not push the boundary themselves.
360 # New servers won't push the boundary if changeset already
360 # New servers won't push the boundary if changeset already
361 # exists locally as secret
361 # exists locally as secret
362 #
362 #
363 # We should not use added here but the list of all change in
363 # We should not use added here but the list of all change in
364 # the bundle
364 # the bundle
365 if repo.publishing():
365 if repo.publishing():
366 phases.advanceboundary(repo, tr, phases.public, cgnodes)
366 phases.advanceboundary(repo, tr, phases.public, cgnodes)
367 else:
367 else:
368 # Those changesets have been pushed from the
368 # Those changesets have been pushed from the
369 # outside, their phases are going to be pushed
369 # outside, their phases are going to be pushed
370 # alongside. Therefor `targetphase` is
370 # alongside. Therefor `targetphase` is
371 # ignored.
371 # ignored.
372 phases.advanceboundary(repo, tr, phases.draft, cgnodes)
372 phases.advanceboundary(repo, tr, phases.draft, cgnodes)
373 phases.retractboundary(repo, tr, phases.draft, added)
373 phases.retractboundary(repo, tr, phases.draft, added)
374 elif srctype != 'strip':
374 elif srctype != 'strip':
375 # publishing only alter behavior during push
375 # publishing only alter behavior during push
376 #
376 #
377 # strip should not touch boundary at all
377 # strip should not touch boundary at all
378 phases.retractboundary(repo, tr, targetphase, added)
378 phases.retractboundary(repo, tr, targetphase, added)
379
379
380 if changesets > 0:
380 if changesets > 0:
381
381
382 def runhooks():
382 def runhooks():
383 # These hooks run when the lock releases, not when the
383 # These hooks run when the lock releases, not when the
384 # transaction closes. So it's possible for the changelog
384 # transaction closes. So it's possible for the changelog
385 # to have changed since we last saw it.
385 # to have changed since we last saw it.
386 if clstart >= len(repo):
386 if clstart >= len(repo):
387 return
387 return
388
388
389 repo.hook("changegroup", **hookargs)
389 repo.hook("changegroup", **hookargs)
390
390
391 for n in added:
391 for n in added:
392 args = hookargs.copy()
392 args = hookargs.copy()
393 args['node'] = hex(n)
393 args['node'] = hex(n)
394 del args['node_last']
394 del args['node_last']
395 repo.hook("incoming", **args)
395 repo.hook("incoming", **args)
396
396
397 newheads = [h for h in repo.heads()
397 newheads = [h for h in repo.heads()
398 if h not in oldheads]
398 if h not in oldheads]
399 repo.ui.log("incoming",
399 repo.ui.log("incoming",
400 "%s incoming changes - new heads: %s\n",
400 "%s incoming changes - new heads: %s\n",
401 len(added),
401 len(added),
402 ', '.join([hex(c[:6]) for c in newheads]))
402 ', '.join([hex(c[:6]) for c in newheads]))
403
403
404 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
404 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
405 lambda tr: repo._afterlock(runhooks))
405 lambda tr: repo._afterlock(runhooks))
406 finally:
406 finally:
407 repo.ui.flush()
407 repo.ui.flush()
408 # never return 0 here:
408 # never return 0 here:
409 if deltaheads < 0:
409 if deltaheads < 0:
410 ret = deltaheads - 1
410 ret = deltaheads - 1
411 else:
411 else:
412 ret = deltaheads + 1
412 ret = deltaheads + 1
413 return ret, added
413 return ret, added
414
414
415 class cg2unpacker(cg1unpacker):
415 class cg2unpacker(cg1unpacker):
416 """Unpacker for cg2 streams.
416 """Unpacker for cg2 streams.
417
417
418 cg2 streams add support for generaldelta, so the delta header
418 cg2 streams add support for generaldelta, so the delta header
419 format is slightly different. All other features about the data
419 format is slightly different. All other features about the data
420 remain the same.
420 remain the same.
421 """
421 """
422 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
422 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
423 deltaheadersize = struct.calcsize(deltaheader)
423 deltaheadersize = struct.calcsize(deltaheader)
424 version = '02'
424 version = '02'
425
425
426 def _deltaheader(self, headertuple, prevnode):
426 def _deltaheader(self, headertuple, prevnode):
427 node, p1, p2, deltabase, cs = headertuple
427 node, p1, p2, deltabase, cs = headertuple
428 flags = 0
428 flags = 0
429 return node, p1, p2, deltabase, cs, flags
429 return node, p1, p2, deltabase, cs, flags
430
430
431 class cg3unpacker(cg2unpacker):
431 class cg3unpacker(cg2unpacker):
432 """Unpacker for cg3 streams.
432 """Unpacker for cg3 streams.
433
433
434 cg3 streams add support for exchanging treemanifests and revlog
434 cg3 streams add support for exchanging treemanifests and revlog
435 flags. It adds the revlog flags to the delta header and an empty chunk
435 flags. It adds the revlog flags to the delta header and an empty chunk
436 separating manifests and files.
436 separating manifests and files.
437 """
437 """
438 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
438 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
439 deltaheadersize = struct.calcsize(deltaheader)
439 deltaheadersize = struct.calcsize(deltaheader)
440 version = '03'
440 version = '03'
441 _grouplistcount = 2 # One list of manifests and one list of files
441 _grouplistcount = 2 # One list of manifests and one list of files
442
442
443 def _deltaheader(self, headertuple, prevnode):
443 def _deltaheader(self, headertuple, prevnode):
444 node, p1, p2, deltabase, cs, flags = headertuple
444 node, p1, p2, deltabase, cs, flags = headertuple
445 return node, p1, p2, deltabase, cs, flags
445 return node, p1, p2, deltabase, cs, flags
446
446
447 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
447 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
448 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
448 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
449 numchanges)
449 numchanges)
450 for chunkdata in iter(self.filelogheader, {}):
450 for chunkdata in iter(self.filelogheader, {}):
451 # If we get here, there are directory manifests in the changegroup
451 # If we get here, there are directory manifests in the changegroup
452 d = chunkdata["filename"]
452 d = chunkdata["filename"]
453 repo.ui.debug("adding %s revisions\n" % d)
453 repo.ui.debug("adding %s revisions\n" % d)
454 dirlog = repo.manifestlog._revlog.dirlog(d)
454 dirlog = repo.manifestlog._revlog.dirlog(d)
455 if not dirlog.addgroup(self, revmap, trp):
455 if not dirlog.addgroup(self, revmap, trp):
456 raise error.Abort(_("received dir revlog group is empty"))
456 raise error.Abort(_("received dir revlog group is empty"))
457
457
458 class headerlessfixup(object):
458 class headerlessfixup(object):
459 def __init__(self, fh, h):
459 def __init__(self, fh, h):
460 self._h = h
460 self._h = h
461 self._fh = fh
461 self._fh = fh
462 def read(self, n):
462 def read(self, n):
463 if self._h:
463 if self._h:
464 d, self._h = self._h[:n], self._h[n:]
464 d, self._h = self._h[:n], self._h[n:]
465 if len(d) < n:
465 if len(d) < n:
466 d += readexactly(self._fh, n - len(d))
466 d += readexactly(self._fh, n - len(d))
467 return d
467 return d
468 return readexactly(self._fh, n)
468 return readexactly(self._fh, n)
469
469
470 class cg1packer(object):
470 class cg1packer(object):
471 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
471 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
472 version = '01'
472 version = '01'
473 def __init__(self, repo, bundlecaps=None):
473 def __init__(self, repo, bundlecaps=None):
474 """Given a source repo, construct a bundler.
474 """Given a source repo, construct a bundler.
475
475
476 bundlecaps is optional and can be used to specify the set of
476 bundlecaps is optional and can be used to specify the set of
477 capabilities which can be used to build the bundle. While bundlecaps is
477 capabilities which can be used to build the bundle. While bundlecaps is
478 unused in core Mercurial, extensions rely on this feature to communicate
478 unused in core Mercurial, extensions rely on this feature to communicate
479 capabilities to customize the changegroup packer.
479 capabilities to customize the changegroup packer.
480 """
480 """
481 # Set of capabilities we can use to build the bundle.
481 # Set of capabilities we can use to build the bundle.
482 if bundlecaps is None:
482 if bundlecaps is None:
483 bundlecaps = set()
483 bundlecaps = set()
484 self._bundlecaps = bundlecaps
484 self._bundlecaps = bundlecaps
485 # experimental config: bundle.reorder
485 # experimental config: bundle.reorder
486 reorder = repo.ui.config('bundle', 'reorder', 'auto')
486 reorder = repo.ui.config('bundle', 'reorder')
487 if reorder == 'auto':
487 if reorder == 'auto':
488 reorder = None
488 reorder = None
489 else:
489 else:
490 reorder = util.parsebool(reorder)
490 reorder = util.parsebool(reorder)
491 self._repo = repo
491 self._repo = repo
492 self._reorder = reorder
492 self._reorder = reorder
493 self._progress = repo.ui.progress
493 self._progress = repo.ui.progress
494 if self._repo.ui.verbose and not self._repo.ui.debugflag:
494 if self._repo.ui.verbose and not self._repo.ui.debugflag:
495 self._verbosenote = self._repo.ui.note
495 self._verbosenote = self._repo.ui.note
496 else:
496 else:
497 self._verbosenote = lambda s: None
497 self._verbosenote = lambda s: None
498
498
499 def close(self):
499 def close(self):
500 return closechunk()
500 return closechunk()
501
501
502 def fileheader(self, fname):
502 def fileheader(self, fname):
503 return chunkheader(len(fname)) + fname
503 return chunkheader(len(fname)) + fname
504
504
505 # Extracted both for clarity and for overriding in extensions.
505 # Extracted both for clarity and for overriding in extensions.
506 def _sortgroup(self, revlog, nodelist, lookup):
506 def _sortgroup(self, revlog, nodelist, lookup):
507 """Sort nodes for change group and turn them into revnums."""
507 """Sort nodes for change group and turn them into revnums."""
508 # for generaldelta revlogs, we linearize the revs; this will both be
508 # for generaldelta revlogs, we linearize the revs; this will both be
509 # much quicker and generate a much smaller bundle
509 # much quicker and generate a much smaller bundle
510 if (revlog._generaldelta and self._reorder is None) or self._reorder:
510 if (revlog._generaldelta and self._reorder is None) or self._reorder:
511 dag = dagutil.revlogdag(revlog)
511 dag = dagutil.revlogdag(revlog)
512 return dag.linearize(set(revlog.rev(n) for n in nodelist))
512 return dag.linearize(set(revlog.rev(n) for n in nodelist))
513 else:
513 else:
514 return sorted([revlog.rev(n) for n in nodelist])
514 return sorted([revlog.rev(n) for n in nodelist])
515
515
516 def group(self, nodelist, revlog, lookup, units=None):
516 def group(self, nodelist, revlog, lookup, units=None):
517 """Calculate a delta group, yielding a sequence of changegroup chunks
517 """Calculate a delta group, yielding a sequence of changegroup chunks
518 (strings).
518 (strings).
519
519
520 Given a list of changeset revs, return a set of deltas and
520 Given a list of changeset revs, return a set of deltas and
521 metadata corresponding to nodes. The first delta is
521 metadata corresponding to nodes. The first delta is
522 first parent(nodelist[0]) -> nodelist[0], the receiver is
522 first parent(nodelist[0]) -> nodelist[0], the receiver is
523 guaranteed to have this parent as it has all history before
523 guaranteed to have this parent as it has all history before
524 these changesets. In the case firstparent is nullrev the
524 these changesets. In the case firstparent is nullrev the
525 changegroup starts with a full revision.
525 changegroup starts with a full revision.
526
526
527 If units is not None, progress detail will be generated, units specifies
527 If units is not None, progress detail will be generated, units specifies
528 the type of revlog that is touched (changelog, manifest, etc.).
528 the type of revlog that is touched (changelog, manifest, etc.).
529 """
529 """
530 # if we don't have any revisions touched by these changesets, bail
530 # if we don't have any revisions touched by these changesets, bail
531 if len(nodelist) == 0:
531 if len(nodelist) == 0:
532 yield self.close()
532 yield self.close()
533 return
533 return
534
534
535 revs = self._sortgroup(revlog, nodelist, lookup)
535 revs = self._sortgroup(revlog, nodelist, lookup)
536
536
537 # add the parent of the first rev
537 # add the parent of the first rev
538 p = revlog.parentrevs(revs[0])[0]
538 p = revlog.parentrevs(revs[0])[0]
539 revs.insert(0, p)
539 revs.insert(0, p)
540
540
541 # build deltas
541 # build deltas
542 total = len(revs) - 1
542 total = len(revs) - 1
543 msgbundling = _('bundling')
543 msgbundling = _('bundling')
544 for r in xrange(len(revs) - 1):
544 for r in xrange(len(revs) - 1):
545 if units is not None:
545 if units is not None:
546 self._progress(msgbundling, r + 1, unit=units, total=total)
546 self._progress(msgbundling, r + 1, unit=units, total=total)
547 prev, curr = revs[r], revs[r + 1]
547 prev, curr = revs[r], revs[r + 1]
548 linknode = lookup(revlog.node(curr))
548 linknode = lookup(revlog.node(curr))
549 for c in self.revchunk(revlog, curr, prev, linknode):
549 for c in self.revchunk(revlog, curr, prev, linknode):
550 yield c
550 yield c
551
551
552 if units is not None:
552 if units is not None:
553 self._progress(msgbundling, None)
553 self._progress(msgbundling, None)
554 yield self.close()
554 yield self.close()
555
555
556 # filter any nodes that claim to be part of the known set
556 # filter any nodes that claim to be part of the known set
557 def prune(self, revlog, missing, commonrevs):
557 def prune(self, revlog, missing, commonrevs):
558 rr, rl = revlog.rev, revlog.linkrev
558 rr, rl = revlog.rev, revlog.linkrev
559 return [n for n in missing if rl(rr(n)) not in commonrevs]
559 return [n for n in missing if rl(rr(n)) not in commonrevs]
560
560
561 def _packmanifests(self, dir, mfnodes, lookuplinknode):
561 def _packmanifests(self, dir, mfnodes, lookuplinknode):
562 """Pack flat manifests into a changegroup stream."""
562 """Pack flat manifests into a changegroup stream."""
563 assert not dir
563 assert not dir
564 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
564 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
565 lookuplinknode, units=_('manifests')):
565 lookuplinknode, units=_('manifests')):
566 yield chunk
566 yield chunk
567
567
568 def _manifestsdone(self):
568 def _manifestsdone(self):
569 return ''
569 return ''
570
570
571 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
571 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
572 '''yield a sequence of changegroup chunks (strings)'''
572 '''yield a sequence of changegroup chunks (strings)'''
573 repo = self._repo
573 repo = self._repo
574 cl = repo.changelog
574 cl = repo.changelog
575
575
576 clrevorder = {}
576 clrevorder = {}
577 mfs = {} # needed manifests
577 mfs = {} # needed manifests
578 fnodes = {} # needed file nodes
578 fnodes = {} # needed file nodes
579 changedfiles = set()
579 changedfiles = set()
580
580
581 # Callback for the changelog, used to collect changed files and manifest
581 # Callback for the changelog, used to collect changed files and manifest
582 # nodes.
582 # nodes.
583 # Returns the linkrev node (identity in the changelog case).
583 # Returns the linkrev node (identity in the changelog case).
584 def lookupcl(x):
584 def lookupcl(x):
585 c = cl.read(x)
585 c = cl.read(x)
586 clrevorder[x] = len(clrevorder)
586 clrevorder[x] = len(clrevorder)
587 n = c[0]
587 n = c[0]
588 # record the first changeset introducing this manifest version
588 # record the first changeset introducing this manifest version
589 mfs.setdefault(n, x)
589 mfs.setdefault(n, x)
590 # Record a complete list of potentially-changed files in
590 # Record a complete list of potentially-changed files in
591 # this manifest.
591 # this manifest.
592 changedfiles.update(c[3])
592 changedfiles.update(c[3])
593 return x
593 return x
594
594
595 self._verbosenote(_('uncompressed size of bundle content:\n'))
595 self._verbosenote(_('uncompressed size of bundle content:\n'))
596 size = 0
596 size = 0
597 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
597 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
598 size += len(chunk)
598 size += len(chunk)
599 yield chunk
599 yield chunk
600 self._verbosenote(_('%8.i (changelog)\n') % size)
600 self._verbosenote(_('%8.i (changelog)\n') % size)
601
601
602 # We need to make sure that the linkrev in the changegroup refers to
602 # We need to make sure that the linkrev in the changegroup refers to
603 # the first changeset that introduced the manifest or file revision.
603 # the first changeset that introduced the manifest or file revision.
604 # The fastpath is usually safer than the slowpath, because the filelogs
604 # The fastpath is usually safer than the slowpath, because the filelogs
605 # are walked in revlog order.
605 # are walked in revlog order.
606 #
606 #
607 # When taking the slowpath with reorder=None and the manifest revlog
607 # When taking the slowpath with reorder=None and the manifest revlog
608 # uses generaldelta, the manifest may be walked in the "wrong" order.
608 # uses generaldelta, the manifest may be walked in the "wrong" order.
609 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
609 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
610 # cc0ff93d0c0c).
610 # cc0ff93d0c0c).
611 #
611 #
612 # When taking the fastpath, we are only vulnerable to reordering
612 # When taking the fastpath, we are only vulnerable to reordering
613 # of the changelog itself. The changelog never uses generaldelta, so
613 # of the changelog itself. The changelog never uses generaldelta, so
614 # it is only reordered when reorder=True. To handle this case, we
614 # it is only reordered when reorder=True. To handle this case, we
615 # simply take the slowpath, which already has the 'clrevorder' logic.
615 # simply take the slowpath, which already has the 'clrevorder' logic.
616 # This was also fixed in cc0ff93d0c0c.
616 # This was also fixed in cc0ff93d0c0c.
617 fastpathlinkrev = fastpathlinkrev and not self._reorder
617 fastpathlinkrev = fastpathlinkrev and not self._reorder
618 # Treemanifests don't work correctly with fastpathlinkrev
618 # Treemanifests don't work correctly with fastpathlinkrev
619 # either, because we don't discover which directory nodes to
619 # either, because we don't discover which directory nodes to
620 # send along with files. This could probably be fixed.
620 # send along with files. This could probably be fixed.
621 fastpathlinkrev = fastpathlinkrev and (
621 fastpathlinkrev = fastpathlinkrev and (
622 'treemanifest' not in repo.requirements)
622 'treemanifest' not in repo.requirements)
623
623
624 for chunk in self.generatemanifests(commonrevs, clrevorder,
624 for chunk in self.generatemanifests(commonrevs, clrevorder,
625 fastpathlinkrev, mfs, fnodes):
625 fastpathlinkrev, mfs, fnodes):
626 yield chunk
626 yield chunk
627 mfs.clear()
627 mfs.clear()
628 clrevs = set(cl.rev(x) for x in clnodes)
628 clrevs = set(cl.rev(x) for x in clnodes)
629
629
630 if not fastpathlinkrev:
630 if not fastpathlinkrev:
631 def linknodes(unused, fname):
631 def linknodes(unused, fname):
632 return fnodes.get(fname, {})
632 return fnodes.get(fname, {})
633 else:
633 else:
634 cln = cl.node
634 cln = cl.node
635 def linknodes(filerevlog, fname):
635 def linknodes(filerevlog, fname):
636 llr = filerevlog.linkrev
636 llr = filerevlog.linkrev
637 fln = filerevlog.node
637 fln = filerevlog.node
638 revs = ((r, llr(r)) for r in filerevlog)
638 revs = ((r, llr(r)) for r in filerevlog)
639 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
639 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
640
640
641 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
641 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
642 source):
642 source):
643 yield chunk
643 yield chunk
644
644
645 yield self.close()
645 yield self.close()
646
646
647 if clnodes:
647 if clnodes:
648 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
648 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
649
649
650 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
650 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
651 fnodes):
651 fnodes):
652 repo = self._repo
652 repo = self._repo
653 mfl = repo.manifestlog
653 mfl = repo.manifestlog
654 dirlog = mfl._revlog.dirlog
654 dirlog = mfl._revlog.dirlog
655 tmfnodes = {'': mfs}
655 tmfnodes = {'': mfs}
656
656
657 # Callback for the manifest, used to collect linkrevs for filelog
657 # Callback for the manifest, used to collect linkrevs for filelog
658 # revisions.
658 # revisions.
659 # Returns the linkrev node (collected in lookupcl).
659 # Returns the linkrev node (collected in lookupcl).
660 def makelookupmflinknode(dir):
660 def makelookupmflinknode(dir):
661 if fastpathlinkrev:
661 if fastpathlinkrev:
662 assert not dir
662 assert not dir
663 return mfs.__getitem__
663 return mfs.__getitem__
664
664
665 def lookupmflinknode(x):
665 def lookupmflinknode(x):
666 """Callback for looking up the linknode for manifests.
666 """Callback for looking up the linknode for manifests.
667
667
668 Returns the linkrev node for the specified manifest.
668 Returns the linkrev node for the specified manifest.
669
669
670 SIDE EFFECT:
670 SIDE EFFECT:
671
671
672 1) fclnodes gets populated with the list of relevant
672 1) fclnodes gets populated with the list of relevant
673 file nodes if we're not using fastpathlinkrev
673 file nodes if we're not using fastpathlinkrev
674 2) When treemanifests are in use, collects treemanifest nodes
674 2) When treemanifests are in use, collects treemanifest nodes
675 to send
675 to send
676
676
677 Note that this means manifests must be completely sent to
677 Note that this means manifests must be completely sent to
678 the client before you can trust the list of files and
678 the client before you can trust the list of files and
679 treemanifests to send.
679 treemanifests to send.
680 """
680 """
681 clnode = tmfnodes[dir][x]
681 clnode = tmfnodes[dir][x]
682 mdata = mfl.get(dir, x).readfast(shallow=True)
682 mdata = mfl.get(dir, x).readfast(shallow=True)
683 for p, n, fl in mdata.iterentries():
683 for p, n, fl in mdata.iterentries():
684 if fl == 't': # subdirectory manifest
684 if fl == 't': # subdirectory manifest
685 subdir = dir + p + '/'
685 subdir = dir + p + '/'
686 tmfclnodes = tmfnodes.setdefault(subdir, {})
686 tmfclnodes = tmfnodes.setdefault(subdir, {})
687 tmfclnode = tmfclnodes.setdefault(n, clnode)
687 tmfclnode = tmfclnodes.setdefault(n, clnode)
688 if clrevorder[clnode] < clrevorder[tmfclnode]:
688 if clrevorder[clnode] < clrevorder[tmfclnode]:
689 tmfclnodes[n] = clnode
689 tmfclnodes[n] = clnode
690 else:
690 else:
691 f = dir + p
691 f = dir + p
692 fclnodes = fnodes.setdefault(f, {})
692 fclnodes = fnodes.setdefault(f, {})
693 fclnode = fclnodes.setdefault(n, clnode)
693 fclnode = fclnodes.setdefault(n, clnode)
694 if clrevorder[clnode] < clrevorder[fclnode]:
694 if clrevorder[clnode] < clrevorder[fclnode]:
695 fclnodes[n] = clnode
695 fclnodes[n] = clnode
696 return clnode
696 return clnode
697 return lookupmflinknode
697 return lookupmflinknode
698
698
699 size = 0
699 size = 0
700 while tmfnodes:
700 while tmfnodes:
701 dir = min(tmfnodes)
701 dir = min(tmfnodes)
702 nodes = tmfnodes[dir]
702 nodes = tmfnodes[dir]
703 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
703 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
704 if not dir or prunednodes:
704 if not dir or prunednodes:
705 for x in self._packmanifests(dir, prunednodes,
705 for x in self._packmanifests(dir, prunednodes,
706 makelookupmflinknode(dir)):
706 makelookupmflinknode(dir)):
707 size += len(x)
707 size += len(x)
708 yield x
708 yield x
709 del tmfnodes[dir]
709 del tmfnodes[dir]
710 self._verbosenote(_('%8.i (manifests)\n') % size)
710 self._verbosenote(_('%8.i (manifests)\n') % size)
711 yield self._manifestsdone()
711 yield self._manifestsdone()
712
712
713 # The 'source' parameter is useful for extensions
713 # The 'source' parameter is useful for extensions
714 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
714 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
715 repo = self._repo
715 repo = self._repo
716 progress = self._progress
716 progress = self._progress
717 msgbundling = _('bundling')
717 msgbundling = _('bundling')
718
718
719 total = len(changedfiles)
719 total = len(changedfiles)
720 # for progress output
720 # for progress output
721 msgfiles = _('files')
721 msgfiles = _('files')
722 for i, fname in enumerate(sorted(changedfiles)):
722 for i, fname in enumerate(sorted(changedfiles)):
723 filerevlog = repo.file(fname)
723 filerevlog = repo.file(fname)
724 if not filerevlog:
724 if not filerevlog:
725 raise error.Abort(_("empty or missing revlog for %s") % fname)
725 raise error.Abort(_("empty or missing revlog for %s") % fname)
726
726
727 linkrevnodes = linknodes(filerevlog, fname)
727 linkrevnodes = linknodes(filerevlog, fname)
728 # Lookup for filenodes, we collected the linkrev nodes above in the
728 # Lookup for filenodes, we collected the linkrev nodes above in the
729 # fastpath case and with lookupmf in the slowpath case.
729 # fastpath case and with lookupmf in the slowpath case.
730 def lookupfilelog(x):
730 def lookupfilelog(x):
731 return linkrevnodes[x]
731 return linkrevnodes[x]
732
732
733 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
733 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
734 if filenodes:
734 if filenodes:
735 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
735 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
736 total=total)
736 total=total)
737 h = self.fileheader(fname)
737 h = self.fileheader(fname)
738 size = len(h)
738 size = len(h)
739 yield h
739 yield h
740 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
740 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
741 size += len(chunk)
741 size += len(chunk)
742 yield chunk
742 yield chunk
743 self._verbosenote(_('%8.i %s\n') % (size, fname))
743 self._verbosenote(_('%8.i %s\n') % (size, fname))
744 progress(msgbundling, None)
744 progress(msgbundling, None)
745
745
746 def deltaparent(self, revlog, rev, p1, p2, prev):
746 def deltaparent(self, revlog, rev, p1, p2, prev):
747 return prev
747 return prev
748
748
749 def revchunk(self, revlog, rev, prev, linknode):
749 def revchunk(self, revlog, rev, prev, linknode):
750 node = revlog.node(rev)
750 node = revlog.node(rev)
751 p1, p2 = revlog.parentrevs(rev)
751 p1, p2 = revlog.parentrevs(rev)
752 base = self.deltaparent(revlog, rev, p1, p2, prev)
752 base = self.deltaparent(revlog, rev, p1, p2, prev)
753
753
754 prefix = ''
754 prefix = ''
755 if revlog.iscensored(base) or revlog.iscensored(rev):
755 if revlog.iscensored(base) or revlog.iscensored(rev):
756 try:
756 try:
757 delta = revlog.revision(node, raw=True)
757 delta = revlog.revision(node, raw=True)
758 except error.CensoredNodeError as e:
758 except error.CensoredNodeError as e:
759 delta = e.tombstone
759 delta = e.tombstone
760 if base == nullrev:
760 if base == nullrev:
761 prefix = mdiff.trivialdiffheader(len(delta))
761 prefix = mdiff.trivialdiffheader(len(delta))
762 else:
762 else:
763 baselen = revlog.rawsize(base)
763 baselen = revlog.rawsize(base)
764 prefix = mdiff.replacediffheader(baselen, len(delta))
764 prefix = mdiff.replacediffheader(baselen, len(delta))
765 elif base == nullrev:
765 elif base == nullrev:
766 delta = revlog.revision(node, raw=True)
766 delta = revlog.revision(node, raw=True)
767 prefix = mdiff.trivialdiffheader(len(delta))
767 prefix = mdiff.trivialdiffheader(len(delta))
768 else:
768 else:
769 delta = revlog.revdiff(base, rev)
769 delta = revlog.revdiff(base, rev)
770 p1n, p2n = revlog.parents(node)
770 p1n, p2n = revlog.parents(node)
771 basenode = revlog.node(base)
771 basenode = revlog.node(base)
772 flags = revlog.flags(rev)
772 flags = revlog.flags(rev)
773 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
773 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
774 meta += prefix
774 meta += prefix
775 l = len(meta) + len(delta)
775 l = len(meta) + len(delta)
776 yield chunkheader(l)
776 yield chunkheader(l)
777 yield meta
777 yield meta
778 yield delta
778 yield delta
779 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
779 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
780 # do nothing with basenode, it is implicitly the previous one in HG10
780 # do nothing with basenode, it is implicitly the previous one in HG10
781 # do nothing with flags, it is implicitly 0 for cg1 and cg2
781 # do nothing with flags, it is implicitly 0 for cg1 and cg2
782 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
782 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
783
783
784 class cg2packer(cg1packer):
784 class cg2packer(cg1packer):
785 version = '02'
785 version = '02'
786 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
786 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
787
787
788 def __init__(self, repo, bundlecaps=None):
788 def __init__(self, repo, bundlecaps=None):
789 super(cg2packer, self).__init__(repo, bundlecaps)
789 super(cg2packer, self).__init__(repo, bundlecaps)
790 if self._reorder is None:
790 if self._reorder is None:
791 # Since generaldelta is directly supported by cg2, reordering
791 # Since generaldelta is directly supported by cg2, reordering
792 # generally doesn't help, so we disable it by default (treating
792 # generally doesn't help, so we disable it by default (treating
793 # bundle.reorder=auto just like bundle.reorder=False).
793 # bundle.reorder=auto just like bundle.reorder=False).
794 self._reorder = False
794 self._reorder = False
795
795
796 def deltaparent(self, revlog, rev, p1, p2, prev):
796 def deltaparent(self, revlog, rev, p1, p2, prev):
797 dp = revlog.deltaparent(rev)
797 dp = revlog.deltaparent(rev)
798 if dp == nullrev and revlog.storedeltachains:
798 if dp == nullrev and revlog.storedeltachains:
799 # Avoid sending full revisions when delta parent is null. Pick prev
799 # Avoid sending full revisions when delta parent is null. Pick prev
800 # in that case. It's tempting to pick p1 in this case, as p1 will
800 # in that case. It's tempting to pick p1 in this case, as p1 will
801 # be smaller in the common case. However, computing a delta against
801 # be smaller in the common case. However, computing a delta against
802 # p1 may require resolving the raw text of p1, which could be
802 # p1 may require resolving the raw text of p1, which could be
803 # expensive. The revlog caches should have prev cached, meaning
803 # expensive. The revlog caches should have prev cached, meaning
804 # less CPU for changegroup generation. There is likely room to add
804 # less CPU for changegroup generation. There is likely room to add
805 # a flag and/or config option to control this behavior.
805 # a flag and/or config option to control this behavior.
806 return prev
806 return prev
807 elif dp == nullrev:
807 elif dp == nullrev:
808 # revlog is configured to use full snapshot for a reason,
808 # revlog is configured to use full snapshot for a reason,
809 # stick to full snapshot.
809 # stick to full snapshot.
810 return nullrev
810 return nullrev
811 elif dp not in (p1, p2, prev):
811 elif dp not in (p1, p2, prev):
812 # Pick prev when we can't be sure remote has the base revision.
812 # Pick prev when we can't be sure remote has the base revision.
813 return prev
813 return prev
814 else:
814 else:
815 return dp
815 return dp
816
816
817 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
817 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
818 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
818 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
819 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
819 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
820
820
821 class cg3packer(cg2packer):
821 class cg3packer(cg2packer):
822 version = '03'
822 version = '03'
823 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
823 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
824
824
825 def _packmanifests(self, dir, mfnodes, lookuplinknode):
825 def _packmanifests(self, dir, mfnodes, lookuplinknode):
826 if dir:
826 if dir:
827 yield self.fileheader(dir)
827 yield self.fileheader(dir)
828
828
829 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
829 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
830 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
830 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
831 units=_('manifests')):
831 units=_('manifests')):
832 yield chunk
832 yield chunk
833
833
834 def _manifestsdone(self):
834 def _manifestsdone(self):
835 return self.close()
835 return self.close()
836
836
837 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
837 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
838 return struct.pack(
838 return struct.pack(
839 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
839 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
840
840
841 _packermap = {'01': (cg1packer, cg1unpacker),
841 _packermap = {'01': (cg1packer, cg1unpacker),
842 # cg2 adds support for exchanging generaldelta
842 # cg2 adds support for exchanging generaldelta
843 '02': (cg2packer, cg2unpacker),
843 '02': (cg2packer, cg2unpacker),
844 # cg3 adds support for exchanging revlog flags and treemanifests
844 # cg3 adds support for exchanging revlog flags and treemanifests
845 '03': (cg3packer, cg3unpacker),
845 '03': (cg3packer, cg3unpacker),
846 }
846 }
847
847
848 def allsupportedversions(repo):
848 def allsupportedversions(repo):
849 versions = set(_packermap.keys())
849 versions = set(_packermap.keys())
850 if not (repo.ui.configbool('experimental', 'changegroup3') or
850 if not (repo.ui.configbool('experimental', 'changegroup3') or
851 repo.ui.configbool('experimental', 'treemanifest') or
851 repo.ui.configbool('experimental', 'treemanifest') or
852 'treemanifest' in repo.requirements):
852 'treemanifest' in repo.requirements):
853 versions.discard('03')
853 versions.discard('03')
854 return versions
854 return versions
855
855
856 # Changegroup versions that can be applied to the repo
856 # Changegroup versions that can be applied to the repo
857 def supportedincomingversions(repo):
857 def supportedincomingversions(repo):
858 return allsupportedversions(repo)
858 return allsupportedversions(repo)
859
859
860 # Changegroup versions that can be created from the repo
860 # Changegroup versions that can be created from the repo
861 def supportedoutgoingversions(repo):
861 def supportedoutgoingversions(repo):
862 versions = allsupportedversions(repo)
862 versions = allsupportedversions(repo)
863 if 'treemanifest' in repo.requirements:
863 if 'treemanifest' in repo.requirements:
864 # Versions 01 and 02 support only flat manifests and it's just too
864 # Versions 01 and 02 support only flat manifests and it's just too
865 # expensive to convert between the flat manifest and tree manifest on
865 # expensive to convert between the flat manifest and tree manifest on
866 # the fly. Since tree manifests are hashed differently, all of history
866 # the fly. Since tree manifests are hashed differently, all of history
867 # would have to be converted. Instead, we simply don't even pretend to
867 # would have to be converted. Instead, we simply don't even pretend to
868 # support versions 01 and 02.
868 # support versions 01 and 02.
869 versions.discard('01')
869 versions.discard('01')
870 versions.discard('02')
870 versions.discard('02')
871 return versions
871 return versions
872
872
873 def safeversion(repo):
873 def safeversion(repo):
874 # Finds the smallest version that it's safe to assume clients of the repo
874 # Finds the smallest version that it's safe to assume clients of the repo
875 # will support. For example, all hg versions that support generaldelta also
875 # will support. For example, all hg versions that support generaldelta also
876 # support changegroup 02.
876 # support changegroup 02.
877 versions = supportedoutgoingversions(repo)
877 versions = supportedoutgoingversions(repo)
878 if 'generaldelta' in repo.requirements:
878 if 'generaldelta' in repo.requirements:
879 versions.discard('01')
879 versions.discard('01')
880 assert versions
880 assert versions
881 return min(versions)
881 return min(versions)
882
882
883 def getbundler(version, repo, bundlecaps=None):
883 def getbundler(version, repo, bundlecaps=None):
884 assert version in supportedoutgoingversions(repo)
884 assert version in supportedoutgoingversions(repo)
885 return _packermap[version][0](repo, bundlecaps)
885 return _packermap[version][0](repo, bundlecaps)
886
886
887 def getunbundler(version, fh, alg, extras=None):
887 def getunbundler(version, fh, alg, extras=None):
888 return _packermap[version][1](fh, alg, extras=extras)
888 return _packermap[version][1](fh, alg, extras=extras)
889
889
890 def _changegroupinfo(repo, nodes, source):
890 def _changegroupinfo(repo, nodes, source):
891 if repo.ui.verbose or source == 'bundle':
891 if repo.ui.verbose or source == 'bundle':
892 repo.ui.status(_("%d changesets found\n") % len(nodes))
892 repo.ui.status(_("%d changesets found\n") % len(nodes))
893 if repo.ui.debugflag:
893 if repo.ui.debugflag:
894 repo.ui.debug("list of changesets:\n")
894 repo.ui.debug("list of changesets:\n")
895 for node in nodes:
895 for node in nodes:
896 repo.ui.debug("%s\n" % hex(node))
896 repo.ui.debug("%s\n" % hex(node))
897
897
898 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
898 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
899 repo = repo.unfiltered()
899 repo = repo.unfiltered()
900 commonrevs = outgoing.common
900 commonrevs = outgoing.common
901 csets = outgoing.missing
901 csets = outgoing.missing
902 heads = outgoing.missingheads
902 heads = outgoing.missingheads
903 # We go through the fast path if we get told to, or if all (unfiltered
903 # We go through the fast path if we get told to, or if all (unfiltered
904 # heads have been requested (since we then know there all linkrevs will
904 # heads have been requested (since we then know there all linkrevs will
905 # be pulled by the client).
905 # be pulled by the client).
906 heads.sort()
906 heads.sort()
907 fastpathlinkrev = fastpath or (
907 fastpathlinkrev = fastpath or (
908 repo.filtername is None and heads == sorted(repo.heads()))
908 repo.filtername is None and heads == sorted(repo.heads()))
909
909
910 repo.hook('preoutgoing', throw=True, source=source)
910 repo.hook('preoutgoing', throw=True, source=source)
911 _changegroupinfo(repo, csets, source)
911 _changegroupinfo(repo, csets, source)
912 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
912 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
913
913
914 def getsubset(repo, outgoing, bundler, source, fastpath=False):
914 def getsubset(repo, outgoing, bundler, source, fastpath=False):
915 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
915 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
916 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
916 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
917 {'clcount': len(outgoing.missing)})
917 {'clcount': len(outgoing.missing)})
918
918
919 def changegroupsubset(repo, roots, heads, source, version='01'):
919 def changegroupsubset(repo, roots, heads, source, version='01'):
920 """Compute a changegroup consisting of all the nodes that are
920 """Compute a changegroup consisting of all the nodes that are
921 descendants of any of the roots and ancestors of any of the heads.
921 descendants of any of the roots and ancestors of any of the heads.
922 Return a chunkbuffer object whose read() method will return
922 Return a chunkbuffer object whose read() method will return
923 successive changegroup chunks.
923 successive changegroup chunks.
924
924
925 It is fairly complex as determining which filenodes and which
925 It is fairly complex as determining which filenodes and which
926 manifest nodes need to be included for the changeset to be complete
926 manifest nodes need to be included for the changeset to be complete
927 is non-trivial.
927 is non-trivial.
928
928
929 Another wrinkle is doing the reverse, figuring out which changeset in
929 Another wrinkle is doing the reverse, figuring out which changeset in
930 the changegroup a particular filenode or manifestnode belongs to.
930 the changegroup a particular filenode or manifestnode belongs to.
931 """
931 """
932 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
932 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
933 bundler = getbundler(version, repo)
933 bundler = getbundler(version, repo)
934 return getsubset(repo, outgoing, bundler, source)
934 return getsubset(repo, outgoing, bundler, source)
935
935
936 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
936 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
937 version='01'):
937 version='01'):
938 """Like getbundle, but taking a discovery.outgoing as an argument.
938 """Like getbundle, but taking a discovery.outgoing as an argument.
939
939
940 This is only implemented for local repos and reuses potentially
940 This is only implemented for local repos and reuses potentially
941 precomputed sets in outgoing. Returns a raw changegroup generator."""
941 precomputed sets in outgoing. Returns a raw changegroup generator."""
942 if not outgoing.missing:
942 if not outgoing.missing:
943 return None
943 return None
944 bundler = getbundler(version, repo, bundlecaps)
944 bundler = getbundler(version, repo, bundlecaps)
945 return getsubsetraw(repo, outgoing, bundler, source)
945 return getsubsetraw(repo, outgoing, bundler, source)
946
946
947 def getchangegroup(repo, source, outgoing, bundlecaps=None,
947 def getchangegroup(repo, source, outgoing, bundlecaps=None,
948 version='01'):
948 version='01'):
949 """Like getbundle, but taking a discovery.outgoing as an argument.
949 """Like getbundle, but taking a discovery.outgoing as an argument.
950
950
951 This is only implemented for local repos and reuses potentially
951 This is only implemented for local repos and reuses potentially
952 precomputed sets in outgoing."""
952 precomputed sets in outgoing."""
953 if not outgoing.missing:
953 if not outgoing.missing:
954 return None
954 return None
955 bundler = getbundler(version, repo, bundlecaps)
955 bundler = getbundler(version, repo, bundlecaps)
956 return getsubset(repo, outgoing, bundler, source)
956 return getsubset(repo, outgoing, bundler, source)
957
957
958 def getlocalchangegroup(repo, *args, **kwargs):
958 def getlocalchangegroup(repo, *args, **kwargs):
959 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
959 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
960 '4.3')
960 '4.3')
961 return getchangegroup(repo, *args, **kwargs)
961 return getchangegroup(repo, *args, **kwargs)
962
962
963 def changegroup(repo, basenodes, source):
963 def changegroup(repo, basenodes, source):
964 # to avoid a race we use changegroupsubset() (issue1320)
964 # to avoid a race we use changegroupsubset() (issue1320)
965 return changegroupsubset(repo, basenodes, repo.heads(), source)
965 return changegroupsubset(repo, basenodes, repo.heads(), source)
966
966
967 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
967 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
968 revisions = 0
968 revisions = 0
969 files = 0
969 files = 0
970 for chunkdata in iter(source.filelogheader, {}):
970 for chunkdata in iter(source.filelogheader, {}):
971 files += 1
971 files += 1
972 f = chunkdata["filename"]
972 f = chunkdata["filename"]
973 repo.ui.debug("adding %s revisions\n" % f)
973 repo.ui.debug("adding %s revisions\n" % f)
974 repo.ui.progress(_('files'), files, unit=_('files'),
974 repo.ui.progress(_('files'), files, unit=_('files'),
975 total=expectedfiles)
975 total=expectedfiles)
976 fl = repo.file(f)
976 fl = repo.file(f)
977 o = len(fl)
977 o = len(fl)
978 try:
978 try:
979 if not fl.addgroup(source, revmap, trp):
979 if not fl.addgroup(source, revmap, trp):
980 raise error.Abort(_("received file revlog group is empty"))
980 raise error.Abort(_("received file revlog group is empty"))
981 except error.CensoredBaseError as e:
981 except error.CensoredBaseError as e:
982 raise error.Abort(_("received delta base is censored: %s") % e)
982 raise error.Abort(_("received delta base is censored: %s") % e)
983 revisions += len(fl) - o
983 revisions += len(fl) - o
984 if f in needfiles:
984 if f in needfiles:
985 needs = needfiles[f]
985 needs = needfiles[f]
986 for new in xrange(o, len(fl)):
986 for new in xrange(o, len(fl)):
987 n = fl.node(new)
987 n = fl.node(new)
988 if n in needs:
988 if n in needs:
989 needs.remove(n)
989 needs.remove(n)
990 else:
990 else:
991 raise error.Abort(
991 raise error.Abort(
992 _("received spurious file revlog entry"))
992 _("received spurious file revlog entry"))
993 if not needs:
993 if not needs:
994 del needfiles[f]
994 del needfiles[f]
995 repo.ui.progress(_('files'), None)
995 repo.ui.progress(_('files'), None)
996
996
997 for f, needs in needfiles.iteritems():
997 for f, needs in needfiles.iteritems():
998 fl = repo.file(f)
998 fl = repo.file(f)
999 for n in needs:
999 for n in needs:
1000 try:
1000 try:
1001 fl.rev(n)
1001 fl.rev(n)
1002 except error.LookupError:
1002 except error.LookupError:
1003 raise error.Abort(
1003 raise error.Abort(
1004 _('missing file data for %s:%s - run hg verify') %
1004 _('missing file data for %s:%s - run hg verify') %
1005 (f, hex(n)))
1005 (f, hex(n)))
1006
1006
1007 return revisions, files
1007 return revisions, files
@@ -1,111 +1,115 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 )
14 )
15
15
16 def loadconfigtable(ui, extname, configtable):
16 def loadconfigtable(ui, extname, configtable):
17 """update config item known to the ui with the extension ones"""
17 """update config item known to the ui with the extension ones"""
18 for section, items in configtable.items():
18 for section, items in configtable.items():
19 knownitems = ui._knownconfig.setdefault(section, {})
19 knownitems = ui._knownconfig.setdefault(section, {})
20 knownkeys = set(knownitems)
20 knownkeys = set(knownitems)
21 newkeys = set(items)
21 newkeys = set(items)
22 for key in sorted(knownkeys & newkeys):
22 for key in sorted(knownkeys & newkeys):
23 msg = "extension '%s' overwrite config item '%s.%s'"
23 msg = "extension '%s' overwrite config item '%s.%s'"
24 msg %= (extname, section, key)
24 msg %= (extname, section, key)
25 ui.develwarn(msg, config='warn-config')
25 ui.develwarn(msg, config='warn-config')
26
26
27 knownitems.update(items)
27 knownitems.update(items)
28
28
29 class configitem(object):
29 class configitem(object):
30 """represent a known config item
30 """represent a known config item
31
31
32 :section: the official config section where to find this item,
32 :section: the official config section where to find this item,
33 :name: the official name within the section,
33 :name: the official name within the section,
34 :default: default value for this item,
34 :default: default value for this item,
35 """
35 """
36
36
37 def __init__(self, section, name, default=None):
37 def __init__(self, section, name, default=None):
38 self.section = section
38 self.section = section
39 self.name = name
39 self.name = name
40 self.default = default
40 self.default = default
41
41
42 coreitems = {}
42 coreitems = {}
43
43
44 def _register(configtable, *args, **kwargs):
44 def _register(configtable, *args, **kwargs):
45 item = configitem(*args, **kwargs)
45 item = configitem(*args, **kwargs)
46 section = configtable.setdefault(item.section, {})
46 section = configtable.setdefault(item.section, {})
47 if item.name in section:
47 if item.name in section:
48 msg = "duplicated config item registration for '%s.%s'"
48 msg = "duplicated config item registration for '%s.%s'"
49 raise error.ProgrammingError(msg % (item.section, item.name))
49 raise error.ProgrammingError(msg % (item.section, item.name))
50 section[item.name] = item
50 section[item.name] = item
51
51
52 # Registering actual config items
52 # Registering actual config items
53
53
54 def getitemregister(configtable):
54 def getitemregister(configtable):
55 return functools.partial(_register, configtable)
55 return functools.partial(_register, configtable)
56
56
57 coreconfigitem = getitemregister(coreitems)
57 coreconfigitem = getitemregister(coreitems)
58
58
59 coreconfigitem('auth', 'cookiefile',
59 coreconfigitem('auth', 'cookiefile',
60 default=None,
60 default=None,
61 )
61 )
62 # bookmarks.pushing: internal hack for discovery
62 # bookmarks.pushing: internal hack for discovery
63 coreconfigitem('bookmarks', 'pushing',
63 coreconfigitem('bookmarks', 'pushing',
64 default=list,
64 default=list,
65 )
65 )
66 # bundle.mainreporoot: internal hack for bundlerepo
66 # bundle.mainreporoot: internal hack for bundlerepo
67 coreconfigitem('bundle', 'mainreporoot',
67 coreconfigitem('bundle', 'mainreporoot',
68 default='',
68 default='',
69 )
69 )
70 # bundle.reorder: experimental config
71 coreconfigitem('bundle', 'reorder',
72 default='auto',
73 )
70 coreconfigitem('color', 'mode',
74 coreconfigitem('color', 'mode',
71 default='auto',
75 default='auto',
72 )
76 )
73 coreconfigitem('devel', 'all-warnings',
77 coreconfigitem('devel', 'all-warnings',
74 default=False,
78 default=False,
75 )
79 )
76 coreconfigitem('devel', 'bundle2.debug',
80 coreconfigitem('devel', 'bundle2.debug',
77 default=False,
81 default=False,
78 )
82 )
79 coreconfigitem('devel', 'check-locks',
83 coreconfigitem('devel', 'check-locks',
80 default=False,
84 default=False,
81 )
85 )
82 coreconfigitem('devel', 'check-relroot',
86 coreconfigitem('devel', 'check-relroot',
83 default=False,
87 default=False,
84 )
88 )
85 coreconfigitem('devel', 'disableloaddefaultcerts',
89 coreconfigitem('devel', 'disableloaddefaultcerts',
86 default=False,
90 default=False,
87 )
91 )
88 coreconfigitem('devel', 'servercafile',
92 coreconfigitem('devel', 'servercafile',
89 default='',
93 default='',
90 )
94 )
91 coreconfigitem('devel', 'serverexactprotocol',
95 coreconfigitem('devel', 'serverexactprotocol',
92 default='',
96 default='',
93 )
97 )
94 coreconfigitem('devel', 'serverrequirecert',
98 coreconfigitem('devel', 'serverrequirecert',
95 default=False,
99 default=False,
96 )
100 )
97 coreconfigitem('devel', 'strip-obsmarkers',
101 coreconfigitem('devel', 'strip-obsmarkers',
98 default=True,
102 default=True,
99 )
103 )
100 coreconfigitem('patch', 'fuzz',
104 coreconfigitem('patch', 'fuzz',
101 default=2,
105 default=2,
102 )
106 )
103 coreconfigitem('ui', 'clonebundleprefers',
107 coreconfigitem('ui', 'clonebundleprefers',
104 default=list,
108 default=list,
105 )
109 )
106 coreconfigitem('ui', 'interactive',
110 coreconfigitem('ui', 'interactive',
107 default=None,
111 default=None,
108 )
112 )
109 coreconfigitem('ui', 'quiet',
113 coreconfigitem('ui', 'quiet',
110 default=False,
114 default=False,
111 )
115 )
General Comments 0
You need to be logged in to leave comments. Login now