##// END OF EJS Templates
readbundle: fix typo of None compression...
Yuya Nishihara -
r26272:59c410db default
parent child Browse files
Show More
@@ -1,894 +1,894 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 branchmap,
24 branchmap,
25 dagutil,
25 dagutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 mdiff,
28 mdiff,
29 phases,
29 phases,
30 util,
30 util,
31 )
31 )
32
32
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise util.Abort(_("stream ended unexpectedly"
40 raise util.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise util.Abort(_("invalid chunk length %d") % l)
51 raise util.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def combineresults(results):
63 def combineresults(results):
64 """logic to combine 0 or more addchangegroup results into one"""
64 """logic to combine 0 or more addchangegroup results into one"""
65 changedheads = 0
65 changedheads = 0
66 result = 1
66 result = 1
67 for ret in results:
67 for ret in results:
68 # If any changegroup result is 0, return 0
68 # If any changegroup result is 0, return 0
69 if ret == 0:
69 if ret == 0:
70 result = 0
70 result = 0
71 break
71 break
72 if ret < -1:
72 if ret < -1:
73 changedheads += ret + 1
73 changedheads += ret + 1
74 elif ret > 1:
74 elif ret > 1:
75 changedheads += ret - 1
75 changedheads += ret - 1
76 if changedheads > 0:
76 if changedheads > 0:
77 result = 1 + changedheads
77 result = 1 + changedheads
78 elif changedheads < 0:
78 elif changedheads < 0:
79 result = -1 + changedheads
79 result = -1 + changedheads
80 return result
80 return result
81
81
82 bundletypes = {
82 bundletypes = {
83 "": ("", 'None'), # only when using unbundle on ssh and old http servers
83 "": ("", None), # only when using unbundle on ssh and old http servers
84 # since the unification ssh accepts a header but there
84 # since the unification ssh accepts a header but there
85 # is no capability signaling it.
85 # is no capability signaling it.
86 "HG20": (), # special-cased below
86 "HG20": (), # special-cased below
87 "HG10UN": ("HG10UN", None),
87 "HG10UN": ("HG10UN", None),
88 "HG10BZ": ("HG10", 'BZ'),
88 "HG10BZ": ("HG10", 'BZ'),
89 "HG10GZ": ("HG10GZ", 'GZ'),
89 "HG10GZ": ("HG10GZ", 'GZ'),
90 }
90 }
91
91
92 # hgweb uses this list to communicate its preferred type
92 # hgweb uses this list to communicate its preferred type
93 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
93 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
94
94
95 def writebundle(ui, cg, filename, bundletype, vfs=None):
95 def writebundle(ui, cg, filename, bundletype, vfs=None):
96 """Write a bundle file and return its filename.
96 """Write a bundle file and return its filename.
97
97
98 Existing files will not be overwritten.
98 Existing files will not be overwritten.
99 If no filename is specified, a temporary file is created.
99 If no filename is specified, a temporary file is created.
100 bz2 compression can be turned off.
100 bz2 compression can be turned off.
101 The bundle file will be deleted in case of errors.
101 The bundle file will be deleted in case of errors.
102 """
102 """
103
103
104 fh = None
104 fh = None
105 cleanup = None
105 cleanup = None
106 try:
106 try:
107 if filename:
107 if filename:
108 if vfs:
108 if vfs:
109 fh = vfs.open(filename, "wb")
109 fh = vfs.open(filename, "wb")
110 else:
110 else:
111 fh = open(filename, "wb")
111 fh = open(filename, "wb")
112 else:
112 else:
113 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
113 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
114 fh = os.fdopen(fd, "wb")
114 fh = os.fdopen(fd, "wb")
115 cleanup = filename
115 cleanup = filename
116
116
117 if bundletype == "HG20":
117 if bundletype == "HG20":
118 from . import bundle2
118 from . import bundle2
119 bundle = bundle2.bundle20(ui)
119 bundle = bundle2.bundle20(ui)
120 part = bundle.newpart('changegroup', data=cg.getchunks())
120 part = bundle.newpart('changegroup', data=cg.getchunks())
121 part.addparam('version', cg.version)
121 part.addparam('version', cg.version)
122 z = util.compressors[None]()
122 z = util.compressors[None]()
123 chunkiter = bundle.getchunks()
123 chunkiter = bundle.getchunks()
124 else:
124 else:
125 if cg.version != '01':
125 if cg.version != '01':
126 raise util.Abort(_('old bundle types only supports v1 '
126 raise util.Abort(_('old bundle types only supports v1 '
127 'changegroups'))
127 'changegroups'))
128 header, comp = bundletypes[bundletype]
128 header, comp = bundletypes[bundletype]
129 fh.write(header)
129 fh.write(header)
130 if comp not in util.compressors:
130 if comp not in util.compressors:
131 raise util.Abort(_('unknown stream compression type: %s')
131 raise util.Abort(_('unknown stream compression type: %s')
132 % comp)
132 % comp)
133 z = util.compressors[comp]()
133 z = util.compressors[comp]()
134 chunkiter = cg.getchunks()
134 chunkiter = cg.getchunks()
135
135
136 # parse the changegroup data, otherwise we will block
136 # parse the changegroup data, otherwise we will block
137 # in case of sshrepo because we don't know the end of the stream
137 # in case of sshrepo because we don't know the end of the stream
138
138
139 # an empty chunkgroup is the end of the changegroup
139 # an empty chunkgroup is the end of the changegroup
140 # a changegroup has at least 2 chunkgroups (changelog and manifest).
140 # a changegroup has at least 2 chunkgroups (changelog and manifest).
141 # after that, an empty chunkgroup is the end of the changegroup
141 # after that, an empty chunkgroup is the end of the changegroup
142 for chunk in chunkiter:
142 for chunk in chunkiter:
143 fh.write(z.compress(chunk))
143 fh.write(z.compress(chunk))
144 fh.write(z.flush())
144 fh.write(z.flush())
145 cleanup = None
145 cleanup = None
146 return filename
146 return filename
147 finally:
147 finally:
148 if fh is not None:
148 if fh is not None:
149 fh.close()
149 fh.close()
150 if cleanup is not None:
150 if cleanup is not None:
151 if filename and vfs:
151 if filename and vfs:
152 vfs.unlink(cleanup)
152 vfs.unlink(cleanup)
153 else:
153 else:
154 os.unlink(cleanup)
154 os.unlink(cleanup)
155
155
156 class cg1unpacker(object):
156 class cg1unpacker(object):
157 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
157 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
158 deltaheadersize = struct.calcsize(deltaheader)
158 deltaheadersize = struct.calcsize(deltaheader)
159 version = '01'
159 version = '01'
160 def __init__(self, fh, alg):
160 def __init__(self, fh, alg):
161 if alg == 'UN':
161 if alg == 'UN':
162 alg = None # get more modern without breaking too much
162 alg = None # get more modern without breaking too much
163 if not alg in util.decompressors:
163 if not alg in util.decompressors:
164 raise util.Abort(_('unknown stream compression type: %s')
164 raise util.Abort(_('unknown stream compression type: %s')
165 % alg)
165 % alg)
166 self._stream = util.decompressors[alg](fh)
166 self._stream = util.decompressors[alg](fh)
167 self._type = alg
167 self._type = alg
168 self.callback = None
168 self.callback = None
169 def compressed(self):
169 def compressed(self):
170 return self._type is not None
170 return self._type is not None
171 def read(self, l):
171 def read(self, l):
172 return self._stream.read(l)
172 return self._stream.read(l)
173 def seek(self, pos):
173 def seek(self, pos):
174 return self._stream.seek(pos)
174 return self._stream.seek(pos)
175 def tell(self):
175 def tell(self):
176 return self._stream.tell()
176 return self._stream.tell()
177 def close(self):
177 def close(self):
178 return self._stream.close()
178 return self._stream.close()
179
179
180 def chunklength(self):
180 def chunklength(self):
181 d = readexactly(self._stream, 4)
181 d = readexactly(self._stream, 4)
182 l = struct.unpack(">l", d)[0]
182 l = struct.unpack(">l", d)[0]
183 if l <= 4:
183 if l <= 4:
184 if l:
184 if l:
185 raise util.Abort(_("invalid chunk length %d") % l)
185 raise util.Abort(_("invalid chunk length %d") % l)
186 return 0
186 return 0
187 if self.callback:
187 if self.callback:
188 self.callback()
188 self.callback()
189 return l - 4
189 return l - 4
190
190
191 def changelogheader(self):
191 def changelogheader(self):
192 """v10 does not have a changelog header chunk"""
192 """v10 does not have a changelog header chunk"""
193 return {}
193 return {}
194
194
195 def manifestheader(self):
195 def manifestheader(self):
196 """v10 does not have a manifest header chunk"""
196 """v10 does not have a manifest header chunk"""
197 return {}
197 return {}
198
198
199 def filelogheader(self):
199 def filelogheader(self):
200 """return the header of the filelogs chunk, v10 only has the filename"""
200 """return the header of the filelogs chunk, v10 only has the filename"""
201 l = self.chunklength()
201 l = self.chunklength()
202 if not l:
202 if not l:
203 return {}
203 return {}
204 fname = readexactly(self._stream, l)
204 fname = readexactly(self._stream, l)
205 return {'filename': fname}
205 return {'filename': fname}
206
206
207 def _deltaheader(self, headertuple, prevnode):
207 def _deltaheader(self, headertuple, prevnode):
208 node, p1, p2, cs = headertuple
208 node, p1, p2, cs = headertuple
209 if prevnode is None:
209 if prevnode is None:
210 deltabase = p1
210 deltabase = p1
211 else:
211 else:
212 deltabase = prevnode
212 deltabase = prevnode
213 return node, p1, p2, deltabase, cs
213 return node, p1, p2, deltabase, cs
214
214
215 def deltachunk(self, prevnode):
215 def deltachunk(self, prevnode):
216 l = self.chunklength()
216 l = self.chunklength()
217 if not l:
217 if not l:
218 return {}
218 return {}
219 headerdata = readexactly(self._stream, self.deltaheadersize)
219 headerdata = readexactly(self._stream, self.deltaheadersize)
220 header = struct.unpack(self.deltaheader, headerdata)
220 header = struct.unpack(self.deltaheader, headerdata)
221 delta = readexactly(self._stream, l - self.deltaheadersize)
221 delta = readexactly(self._stream, l - self.deltaheadersize)
222 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
222 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
223 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
223 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
224 'deltabase': deltabase, 'delta': delta}
224 'deltabase': deltabase, 'delta': delta}
225
225
226 def getchunks(self):
226 def getchunks(self):
227 """returns all the chunks contains in the bundle
227 """returns all the chunks contains in the bundle
228
228
229 Used when you need to forward the binary stream to a file or another
229 Used when you need to forward the binary stream to a file or another
230 network API. To do so, it parse the changegroup data, otherwise it will
230 network API. To do so, it parse the changegroup data, otherwise it will
231 block in case of sshrepo because it don't know the end of the stream.
231 block in case of sshrepo because it don't know the end of the stream.
232 """
232 """
233 # an empty chunkgroup is the end of the changegroup
233 # an empty chunkgroup is the end of the changegroup
234 # a changegroup has at least 2 chunkgroups (changelog and manifest).
234 # a changegroup has at least 2 chunkgroups (changelog and manifest).
235 # after that, an empty chunkgroup is the end of the changegroup
235 # after that, an empty chunkgroup is the end of the changegroup
236 empty = False
236 empty = False
237 count = 0
237 count = 0
238 while not empty or count <= 2:
238 while not empty or count <= 2:
239 empty = True
239 empty = True
240 count += 1
240 count += 1
241 while True:
241 while True:
242 chunk = getchunk(self)
242 chunk = getchunk(self)
243 if not chunk:
243 if not chunk:
244 break
244 break
245 empty = False
245 empty = False
246 yield chunkheader(len(chunk))
246 yield chunkheader(len(chunk))
247 pos = 0
247 pos = 0
248 while pos < len(chunk):
248 while pos < len(chunk):
249 next = pos + 2**20
249 next = pos + 2**20
250 yield chunk[pos:next]
250 yield chunk[pos:next]
251 pos = next
251 pos = next
252 yield closechunk()
252 yield closechunk()
253
253
254 class cg2unpacker(cg1unpacker):
254 class cg2unpacker(cg1unpacker):
255 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
255 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
256 deltaheadersize = struct.calcsize(deltaheader)
256 deltaheadersize = struct.calcsize(deltaheader)
257 version = '02'
257 version = '02'
258
258
259 def _deltaheader(self, headertuple, prevnode):
259 def _deltaheader(self, headertuple, prevnode):
260 node, p1, p2, deltabase, cs = headertuple
260 node, p1, p2, deltabase, cs = headertuple
261 return node, p1, p2, deltabase, cs
261 return node, p1, p2, deltabase, cs
262
262
263 class headerlessfixup(object):
263 class headerlessfixup(object):
264 def __init__(self, fh, h):
264 def __init__(self, fh, h):
265 self._h = h
265 self._h = h
266 self._fh = fh
266 self._fh = fh
267 def read(self, n):
267 def read(self, n):
268 if self._h:
268 if self._h:
269 d, self._h = self._h[:n], self._h[n:]
269 d, self._h = self._h[:n], self._h[n:]
270 if len(d) < n:
270 if len(d) < n:
271 d += readexactly(self._fh, n - len(d))
271 d += readexactly(self._fh, n - len(d))
272 return d
272 return d
273 return readexactly(self._fh, n)
273 return readexactly(self._fh, n)
274
274
275 class cg1packer(object):
275 class cg1packer(object):
276 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
276 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
277 version = '01'
277 version = '01'
278 def __init__(self, repo, bundlecaps=None):
278 def __init__(self, repo, bundlecaps=None):
279 """Given a source repo, construct a bundler.
279 """Given a source repo, construct a bundler.
280
280
281 bundlecaps is optional and can be used to specify the set of
281 bundlecaps is optional and can be used to specify the set of
282 capabilities which can be used to build the bundle.
282 capabilities which can be used to build the bundle.
283 """
283 """
284 # Set of capabilities we can use to build the bundle.
284 # Set of capabilities we can use to build the bundle.
285 if bundlecaps is None:
285 if bundlecaps is None:
286 bundlecaps = set()
286 bundlecaps = set()
287 self._bundlecaps = bundlecaps
287 self._bundlecaps = bundlecaps
288 # experimental config: bundle.reorder
288 # experimental config: bundle.reorder
289 reorder = repo.ui.config('bundle', 'reorder', 'auto')
289 reorder = repo.ui.config('bundle', 'reorder', 'auto')
290 if reorder == 'auto':
290 if reorder == 'auto':
291 reorder = None
291 reorder = None
292 else:
292 else:
293 reorder = util.parsebool(reorder)
293 reorder = util.parsebool(reorder)
294 self._repo = repo
294 self._repo = repo
295 self._reorder = reorder
295 self._reorder = reorder
296 self._progress = repo.ui.progress
296 self._progress = repo.ui.progress
297 if self._repo.ui.verbose and not self._repo.ui.debugflag:
297 if self._repo.ui.verbose and not self._repo.ui.debugflag:
298 self._verbosenote = self._repo.ui.note
298 self._verbosenote = self._repo.ui.note
299 else:
299 else:
300 self._verbosenote = lambda s: None
300 self._verbosenote = lambda s: None
301
301
302 def close(self):
302 def close(self):
303 return closechunk()
303 return closechunk()
304
304
305 def fileheader(self, fname):
305 def fileheader(self, fname):
306 return chunkheader(len(fname)) + fname
306 return chunkheader(len(fname)) + fname
307
307
308 def group(self, nodelist, revlog, lookup, units=None):
308 def group(self, nodelist, revlog, lookup, units=None):
309 """Calculate a delta group, yielding a sequence of changegroup chunks
309 """Calculate a delta group, yielding a sequence of changegroup chunks
310 (strings).
310 (strings).
311
311
312 Given a list of changeset revs, return a set of deltas and
312 Given a list of changeset revs, return a set of deltas and
313 metadata corresponding to nodes. The first delta is
313 metadata corresponding to nodes. The first delta is
314 first parent(nodelist[0]) -> nodelist[0], the receiver is
314 first parent(nodelist[0]) -> nodelist[0], the receiver is
315 guaranteed to have this parent as it has all history before
315 guaranteed to have this parent as it has all history before
316 these changesets. In the case firstparent is nullrev the
316 these changesets. In the case firstparent is nullrev the
317 changegroup starts with a full revision.
317 changegroup starts with a full revision.
318
318
319 If units is not None, progress detail will be generated, units specifies
319 If units is not None, progress detail will be generated, units specifies
320 the type of revlog that is touched (changelog, manifest, etc.).
320 the type of revlog that is touched (changelog, manifest, etc.).
321 """
321 """
322 # if we don't have any revisions touched by these changesets, bail
322 # if we don't have any revisions touched by these changesets, bail
323 if len(nodelist) == 0:
323 if len(nodelist) == 0:
324 yield self.close()
324 yield self.close()
325 return
325 return
326
326
327 # for generaldelta revlogs, we linearize the revs; this will both be
327 # for generaldelta revlogs, we linearize the revs; this will both be
328 # much quicker and generate a much smaller bundle
328 # much quicker and generate a much smaller bundle
329 if (revlog._generaldelta and self._reorder is None) or self._reorder:
329 if (revlog._generaldelta and self._reorder is None) or self._reorder:
330 dag = dagutil.revlogdag(revlog)
330 dag = dagutil.revlogdag(revlog)
331 revs = set(revlog.rev(n) for n in nodelist)
331 revs = set(revlog.rev(n) for n in nodelist)
332 revs = dag.linearize(revs)
332 revs = dag.linearize(revs)
333 else:
333 else:
334 revs = sorted([revlog.rev(n) for n in nodelist])
334 revs = sorted([revlog.rev(n) for n in nodelist])
335
335
336 # add the parent of the first rev
336 # add the parent of the first rev
337 p = revlog.parentrevs(revs[0])[0]
337 p = revlog.parentrevs(revs[0])[0]
338 revs.insert(0, p)
338 revs.insert(0, p)
339
339
340 # build deltas
340 # build deltas
341 total = len(revs) - 1
341 total = len(revs) - 1
342 msgbundling = _('bundling')
342 msgbundling = _('bundling')
343 for r in xrange(len(revs) - 1):
343 for r in xrange(len(revs) - 1):
344 if units is not None:
344 if units is not None:
345 self._progress(msgbundling, r + 1, unit=units, total=total)
345 self._progress(msgbundling, r + 1, unit=units, total=total)
346 prev, curr = revs[r], revs[r + 1]
346 prev, curr = revs[r], revs[r + 1]
347 linknode = lookup(revlog.node(curr))
347 linknode = lookup(revlog.node(curr))
348 for c in self.revchunk(revlog, curr, prev, linknode):
348 for c in self.revchunk(revlog, curr, prev, linknode):
349 yield c
349 yield c
350
350
351 if units is not None:
351 if units is not None:
352 self._progress(msgbundling, None)
352 self._progress(msgbundling, None)
353 yield self.close()
353 yield self.close()
354
354
355 # filter any nodes that claim to be part of the known set
355 # filter any nodes that claim to be part of the known set
356 def prune(self, revlog, missing, commonrevs):
356 def prune(self, revlog, missing, commonrevs):
357 rr, rl = revlog.rev, revlog.linkrev
357 rr, rl = revlog.rev, revlog.linkrev
358 return [n for n in missing if rl(rr(n)) not in commonrevs]
358 return [n for n in missing if rl(rr(n)) not in commonrevs]
359
359
360 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
360 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
361 '''yield a sequence of changegroup chunks (strings)'''
361 '''yield a sequence of changegroup chunks (strings)'''
362 repo = self._repo
362 repo = self._repo
363 cl = repo.changelog
363 cl = repo.changelog
364 ml = repo.manifest
364 ml = repo.manifest
365
365
366 clrevorder = {}
366 clrevorder = {}
367 mfs = {} # needed manifests
367 mfs = {} # needed manifests
368 fnodes = {} # needed file nodes
368 fnodes = {} # needed file nodes
369 changedfiles = set()
369 changedfiles = set()
370
370
371 # Callback for the changelog, used to collect changed files and manifest
371 # Callback for the changelog, used to collect changed files and manifest
372 # nodes.
372 # nodes.
373 # Returns the linkrev node (identity in the changelog case).
373 # Returns the linkrev node (identity in the changelog case).
374 def lookupcl(x):
374 def lookupcl(x):
375 c = cl.read(x)
375 c = cl.read(x)
376 clrevorder[x] = len(clrevorder)
376 clrevorder[x] = len(clrevorder)
377 changedfiles.update(c[3])
377 changedfiles.update(c[3])
378 # record the first changeset introducing this manifest version
378 # record the first changeset introducing this manifest version
379 mfs.setdefault(c[0], x)
379 mfs.setdefault(c[0], x)
380 return x
380 return x
381
381
382 self._verbosenote(_('uncompressed size of bundle content:\n'))
382 self._verbosenote(_('uncompressed size of bundle content:\n'))
383 size = 0
383 size = 0
384 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
384 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
385 size += len(chunk)
385 size += len(chunk)
386 yield chunk
386 yield chunk
387 self._verbosenote(_('%8.i (changelog)\n') % size)
387 self._verbosenote(_('%8.i (changelog)\n') % size)
388
388
389 # We need to make sure that the linkrev in the changegroup refers to
389 # We need to make sure that the linkrev in the changegroup refers to
390 # the first changeset that introduced the manifest or file revision.
390 # the first changeset that introduced the manifest or file revision.
391 # The fastpath is usually safer than the slowpath, because the filelogs
391 # The fastpath is usually safer than the slowpath, because the filelogs
392 # are walked in revlog order.
392 # are walked in revlog order.
393 #
393 #
394 # When taking the slowpath with reorder=None and the manifest revlog
394 # When taking the slowpath with reorder=None and the manifest revlog
395 # uses generaldelta, the manifest may be walked in the "wrong" order.
395 # uses generaldelta, the manifest may be walked in the "wrong" order.
396 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
396 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
397 # cc0ff93d0c0c).
397 # cc0ff93d0c0c).
398 #
398 #
399 # When taking the fastpath, we are only vulnerable to reordering
399 # When taking the fastpath, we are only vulnerable to reordering
400 # of the changelog itself. The changelog never uses generaldelta, so
400 # of the changelog itself. The changelog never uses generaldelta, so
401 # it is only reordered when reorder=True. To handle this case, we
401 # it is only reordered when reorder=True. To handle this case, we
402 # simply take the slowpath, which already has the 'clrevorder' logic.
402 # simply take the slowpath, which already has the 'clrevorder' logic.
403 # This was also fixed in cc0ff93d0c0c.
403 # This was also fixed in cc0ff93d0c0c.
404 fastpathlinkrev = fastpathlinkrev and not self._reorder
404 fastpathlinkrev = fastpathlinkrev and not self._reorder
405 # Callback for the manifest, used to collect linkrevs for filelog
405 # Callback for the manifest, used to collect linkrevs for filelog
406 # revisions.
406 # revisions.
407 # Returns the linkrev node (collected in lookupcl).
407 # Returns the linkrev node (collected in lookupcl).
408 def lookupmf(x):
408 def lookupmf(x):
409 clnode = mfs[x]
409 clnode = mfs[x]
410 if not fastpathlinkrev:
410 if not fastpathlinkrev:
411 mdata = ml.readfast(x)
411 mdata = ml.readfast(x)
412 for f, n in mdata.iteritems():
412 for f, n in mdata.iteritems():
413 if f in changedfiles:
413 if f in changedfiles:
414 # record the first changeset introducing this filelog
414 # record the first changeset introducing this filelog
415 # version
415 # version
416 fclnodes = fnodes.setdefault(f, {})
416 fclnodes = fnodes.setdefault(f, {})
417 fclnode = fclnodes.setdefault(n, clnode)
417 fclnode = fclnodes.setdefault(n, clnode)
418 if clrevorder[clnode] < clrevorder[fclnode]:
418 if clrevorder[clnode] < clrevorder[fclnode]:
419 fclnodes[n] = clnode
419 fclnodes[n] = clnode
420 return clnode
420 return clnode
421
421
422 mfnodes = self.prune(ml, mfs, commonrevs)
422 mfnodes = self.prune(ml, mfs, commonrevs)
423 size = 0
423 size = 0
424 for chunk in self.group(mfnodes, ml, lookupmf, units=_('manifests')):
424 for chunk in self.group(mfnodes, ml, lookupmf, units=_('manifests')):
425 size += len(chunk)
425 size += len(chunk)
426 yield chunk
426 yield chunk
427 self._verbosenote(_('%8.i (manifests)\n') % size)
427 self._verbosenote(_('%8.i (manifests)\n') % size)
428
428
429 mfs.clear()
429 mfs.clear()
430 clrevs = set(cl.rev(x) for x in clnodes)
430 clrevs = set(cl.rev(x) for x in clnodes)
431
431
432 def linknodes(filerevlog, fname):
432 def linknodes(filerevlog, fname):
433 if fastpathlinkrev:
433 if fastpathlinkrev:
434 llr = filerevlog.linkrev
434 llr = filerevlog.linkrev
435 def genfilenodes():
435 def genfilenodes():
436 for r in filerevlog:
436 for r in filerevlog:
437 linkrev = llr(r)
437 linkrev = llr(r)
438 if linkrev in clrevs:
438 if linkrev in clrevs:
439 yield filerevlog.node(r), cl.node(linkrev)
439 yield filerevlog.node(r), cl.node(linkrev)
440 return dict(genfilenodes())
440 return dict(genfilenodes())
441 return fnodes.get(fname, {})
441 return fnodes.get(fname, {})
442
442
443 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
443 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
444 source):
444 source):
445 yield chunk
445 yield chunk
446
446
447 yield self.close()
447 yield self.close()
448
448
449 if clnodes:
449 if clnodes:
450 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
450 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
451
451
452 # The 'source' parameter is useful for extensions
452 # The 'source' parameter is useful for extensions
453 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
453 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
454 repo = self._repo
454 repo = self._repo
455 progress = self._progress
455 progress = self._progress
456 msgbundling = _('bundling')
456 msgbundling = _('bundling')
457
457
458 total = len(changedfiles)
458 total = len(changedfiles)
459 # for progress output
459 # for progress output
460 msgfiles = _('files')
460 msgfiles = _('files')
461 for i, fname in enumerate(sorted(changedfiles)):
461 for i, fname in enumerate(sorted(changedfiles)):
462 filerevlog = repo.file(fname)
462 filerevlog = repo.file(fname)
463 if not filerevlog:
463 if not filerevlog:
464 raise util.Abort(_("empty or missing revlog for %s") % fname)
464 raise util.Abort(_("empty or missing revlog for %s") % fname)
465
465
466 linkrevnodes = linknodes(filerevlog, fname)
466 linkrevnodes = linknodes(filerevlog, fname)
467 # Lookup for filenodes, we collected the linkrev nodes above in the
467 # Lookup for filenodes, we collected the linkrev nodes above in the
468 # fastpath case and with lookupmf in the slowpath case.
468 # fastpath case and with lookupmf in the slowpath case.
469 def lookupfilelog(x):
469 def lookupfilelog(x):
470 return linkrevnodes[x]
470 return linkrevnodes[x]
471
471
472 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
472 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
473 if filenodes:
473 if filenodes:
474 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
474 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
475 total=total)
475 total=total)
476 h = self.fileheader(fname)
476 h = self.fileheader(fname)
477 size = len(h)
477 size = len(h)
478 yield h
478 yield h
479 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
479 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
480 size += len(chunk)
480 size += len(chunk)
481 yield chunk
481 yield chunk
482 self._verbosenote(_('%8.i %s\n') % (size, fname))
482 self._verbosenote(_('%8.i %s\n') % (size, fname))
483 progress(msgbundling, None)
483 progress(msgbundling, None)
484
484
485 def deltaparent(self, revlog, rev, p1, p2, prev):
485 def deltaparent(self, revlog, rev, p1, p2, prev):
486 return prev
486 return prev
487
487
488 def revchunk(self, revlog, rev, prev, linknode):
488 def revchunk(self, revlog, rev, prev, linknode):
489 node = revlog.node(rev)
489 node = revlog.node(rev)
490 p1, p2 = revlog.parentrevs(rev)
490 p1, p2 = revlog.parentrevs(rev)
491 base = self.deltaparent(revlog, rev, p1, p2, prev)
491 base = self.deltaparent(revlog, rev, p1, p2, prev)
492
492
493 prefix = ''
493 prefix = ''
494 if revlog.iscensored(base) or revlog.iscensored(rev):
494 if revlog.iscensored(base) or revlog.iscensored(rev):
495 try:
495 try:
496 delta = revlog.revision(node)
496 delta = revlog.revision(node)
497 except error.CensoredNodeError as e:
497 except error.CensoredNodeError as e:
498 delta = e.tombstone
498 delta = e.tombstone
499 if base == nullrev:
499 if base == nullrev:
500 prefix = mdiff.trivialdiffheader(len(delta))
500 prefix = mdiff.trivialdiffheader(len(delta))
501 else:
501 else:
502 baselen = revlog.rawsize(base)
502 baselen = revlog.rawsize(base)
503 prefix = mdiff.replacediffheader(baselen, len(delta))
503 prefix = mdiff.replacediffheader(baselen, len(delta))
504 elif base == nullrev:
504 elif base == nullrev:
505 delta = revlog.revision(node)
505 delta = revlog.revision(node)
506 prefix = mdiff.trivialdiffheader(len(delta))
506 prefix = mdiff.trivialdiffheader(len(delta))
507 else:
507 else:
508 delta = revlog.revdiff(base, rev)
508 delta = revlog.revdiff(base, rev)
509 p1n, p2n = revlog.parents(node)
509 p1n, p2n = revlog.parents(node)
510 basenode = revlog.node(base)
510 basenode = revlog.node(base)
511 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
511 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
512 meta += prefix
512 meta += prefix
513 l = len(meta) + len(delta)
513 l = len(meta) + len(delta)
514 yield chunkheader(l)
514 yield chunkheader(l)
515 yield meta
515 yield meta
516 yield delta
516 yield delta
517 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
517 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
518 # do nothing with basenode, it is implicitly the previous one in HG10
518 # do nothing with basenode, it is implicitly the previous one in HG10
519 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
519 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
520
520
521 class cg2packer(cg1packer):
521 class cg2packer(cg1packer):
522 version = '02'
522 version = '02'
523 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
523 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
524
524
525 def __init__(self, repo, bundlecaps=None):
525 def __init__(self, repo, bundlecaps=None):
526 super(cg2packer, self).__init__(repo, bundlecaps)
526 super(cg2packer, self).__init__(repo, bundlecaps)
527 if self._reorder is None:
527 if self._reorder is None:
528 # Since generaldelta is directly supported by cg2, reordering
528 # Since generaldelta is directly supported by cg2, reordering
529 # generally doesn't help, so we disable it by default (treating
529 # generally doesn't help, so we disable it by default (treating
530 # bundle.reorder=auto just like bundle.reorder=False).
530 # bundle.reorder=auto just like bundle.reorder=False).
531 self._reorder = False
531 self._reorder = False
532
532
533 def deltaparent(self, revlog, rev, p1, p2, prev):
533 def deltaparent(self, revlog, rev, p1, p2, prev):
534 dp = revlog.deltaparent(rev)
534 dp = revlog.deltaparent(rev)
535 # avoid storing full revisions; pick prev in those cases
535 # avoid storing full revisions; pick prev in those cases
536 # also pick prev when we can't be sure remote has dp
536 # also pick prev when we can't be sure remote has dp
537 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
537 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
538 return prev
538 return prev
539 return dp
539 return dp
540
540
541 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
541 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
542 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
542 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
543
543
544 packermap = {'01': (cg1packer, cg1unpacker),
544 packermap = {'01': (cg1packer, cg1unpacker),
545 '02': (cg2packer, cg2unpacker)}
545 '02': (cg2packer, cg2unpacker)}
546
546
547 def _changegroupinfo(repo, nodes, source):
547 def _changegroupinfo(repo, nodes, source):
548 if repo.ui.verbose or source == 'bundle':
548 if repo.ui.verbose or source == 'bundle':
549 repo.ui.status(_("%d changesets found\n") % len(nodes))
549 repo.ui.status(_("%d changesets found\n") % len(nodes))
550 if repo.ui.debugflag:
550 if repo.ui.debugflag:
551 repo.ui.debug("list of changesets:\n")
551 repo.ui.debug("list of changesets:\n")
552 for node in nodes:
552 for node in nodes:
553 repo.ui.debug("%s\n" % hex(node))
553 repo.ui.debug("%s\n" % hex(node))
554
554
555 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
555 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
556 repo = repo.unfiltered()
556 repo = repo.unfiltered()
557 commonrevs = outgoing.common
557 commonrevs = outgoing.common
558 csets = outgoing.missing
558 csets = outgoing.missing
559 heads = outgoing.missingheads
559 heads = outgoing.missingheads
560 # We go through the fast path if we get told to, or if all (unfiltered
560 # We go through the fast path if we get told to, or if all (unfiltered
561 # heads have been requested (since we then know there all linkrevs will
561 # heads have been requested (since we then know there all linkrevs will
562 # be pulled by the client).
562 # be pulled by the client).
563 heads.sort()
563 heads.sort()
564 fastpathlinkrev = fastpath or (
564 fastpathlinkrev = fastpath or (
565 repo.filtername is None and heads == sorted(repo.heads()))
565 repo.filtername is None and heads == sorted(repo.heads()))
566
566
567 repo.hook('preoutgoing', throw=True, source=source)
567 repo.hook('preoutgoing', throw=True, source=source)
568 _changegroupinfo(repo, csets, source)
568 _changegroupinfo(repo, csets, source)
569 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
569 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
570
570
571 def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
571 def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
572 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
572 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
573 return packermap[version][1](util.chunkbuffer(gengroup), None)
573 return packermap[version][1](util.chunkbuffer(gengroup), None)
574
574
575 def changegroupsubset(repo, roots, heads, source, version='01'):
575 def changegroupsubset(repo, roots, heads, source, version='01'):
576 """Compute a changegroup consisting of all the nodes that are
576 """Compute a changegroup consisting of all the nodes that are
577 descendants of any of the roots and ancestors of any of the heads.
577 descendants of any of the roots and ancestors of any of the heads.
578 Return a chunkbuffer object whose read() method will return
578 Return a chunkbuffer object whose read() method will return
579 successive changegroup chunks.
579 successive changegroup chunks.
580
580
581 It is fairly complex as determining which filenodes and which
581 It is fairly complex as determining which filenodes and which
582 manifest nodes need to be included for the changeset to be complete
582 manifest nodes need to be included for the changeset to be complete
583 is non-trivial.
583 is non-trivial.
584
584
585 Another wrinkle is doing the reverse, figuring out which changeset in
585 Another wrinkle is doing the reverse, figuring out which changeset in
586 the changegroup a particular filenode or manifestnode belongs to.
586 the changegroup a particular filenode or manifestnode belongs to.
587 """
587 """
588 cl = repo.changelog
588 cl = repo.changelog
589 if not roots:
589 if not roots:
590 roots = [nullid]
590 roots = [nullid]
591 discbases = []
591 discbases = []
592 for n in roots:
592 for n in roots:
593 discbases.extend([p for p in cl.parents(n) if p != nullid])
593 discbases.extend([p for p in cl.parents(n) if p != nullid])
594 # TODO: remove call to nodesbetween.
594 # TODO: remove call to nodesbetween.
595 csets, roots, heads = cl.nodesbetween(roots, heads)
595 csets, roots, heads = cl.nodesbetween(roots, heads)
596 included = set(csets)
596 included = set(csets)
597 discbases = [n for n in discbases if n not in included]
597 discbases = [n for n in discbases if n not in included]
598 outgoing = discovery.outgoing(cl, discbases, heads)
598 outgoing = discovery.outgoing(cl, discbases, heads)
599 bundler = packermap[version][0](repo)
599 bundler = packermap[version][0](repo)
600 return getsubset(repo, outgoing, bundler, source, version=version)
600 return getsubset(repo, outgoing, bundler, source, version=version)
601
601
602 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
602 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
603 version='01'):
603 version='01'):
604 """Like getbundle, but taking a discovery.outgoing as an argument.
604 """Like getbundle, but taking a discovery.outgoing as an argument.
605
605
606 This is only implemented for local repos and reuses potentially
606 This is only implemented for local repos and reuses potentially
607 precomputed sets in outgoing. Returns a raw changegroup generator."""
607 precomputed sets in outgoing. Returns a raw changegroup generator."""
608 if not outgoing.missing:
608 if not outgoing.missing:
609 return None
609 return None
610 bundler = packermap[version][0](repo, bundlecaps)
610 bundler = packermap[version][0](repo, bundlecaps)
611 return getsubsetraw(repo, outgoing, bundler, source)
611 return getsubsetraw(repo, outgoing, bundler, source)
612
612
613 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
613 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
614 """Like getbundle, but taking a discovery.outgoing as an argument.
614 """Like getbundle, but taking a discovery.outgoing as an argument.
615
615
616 This is only implemented for local repos and reuses potentially
616 This is only implemented for local repos and reuses potentially
617 precomputed sets in outgoing."""
617 precomputed sets in outgoing."""
618 if not outgoing.missing:
618 if not outgoing.missing:
619 return None
619 return None
620 bundler = cg1packer(repo, bundlecaps)
620 bundler = cg1packer(repo, bundlecaps)
621 return getsubset(repo, outgoing, bundler, source)
621 return getsubset(repo, outgoing, bundler, source)
622
622
623 def computeoutgoing(repo, heads, common):
623 def computeoutgoing(repo, heads, common):
624 """Computes which revs are outgoing given a set of common
624 """Computes which revs are outgoing given a set of common
625 and a set of heads.
625 and a set of heads.
626
626
627 This is a separate function so extensions can have access to
627 This is a separate function so extensions can have access to
628 the logic.
628 the logic.
629
629
630 Returns a discovery.outgoing object.
630 Returns a discovery.outgoing object.
631 """
631 """
632 cl = repo.changelog
632 cl = repo.changelog
633 if common:
633 if common:
634 hasnode = cl.hasnode
634 hasnode = cl.hasnode
635 common = [n for n in common if hasnode(n)]
635 common = [n for n in common if hasnode(n)]
636 else:
636 else:
637 common = [nullid]
637 common = [nullid]
638 if not heads:
638 if not heads:
639 heads = cl.heads()
639 heads = cl.heads()
640 return discovery.outgoing(cl, common, heads)
640 return discovery.outgoing(cl, common, heads)
641
641
642 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
642 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
643 """Like changegroupsubset, but returns the set difference between the
643 """Like changegroupsubset, but returns the set difference between the
644 ancestors of heads and the ancestors common.
644 ancestors of heads and the ancestors common.
645
645
646 If heads is None, use the local heads. If common is None, use [nullid].
646 If heads is None, use the local heads. If common is None, use [nullid].
647
647
648 The nodes in common might not all be known locally due to the way the
648 The nodes in common might not all be known locally due to the way the
649 current discovery protocol works.
649 current discovery protocol works.
650 """
650 """
651 outgoing = computeoutgoing(repo, heads, common)
651 outgoing = computeoutgoing(repo, heads, common)
652 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
652 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
653
653
654 def changegroup(repo, basenodes, source):
654 def changegroup(repo, basenodes, source):
655 # to avoid a race we use changegroupsubset() (issue1320)
655 # to avoid a race we use changegroupsubset() (issue1320)
656 return changegroupsubset(repo, basenodes, repo.heads(), source)
656 return changegroupsubset(repo, basenodes, repo.heads(), source)
657
657
658 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
658 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
659 revisions = 0
659 revisions = 0
660 files = 0
660 files = 0
661 while True:
661 while True:
662 chunkdata = source.filelogheader()
662 chunkdata = source.filelogheader()
663 if not chunkdata:
663 if not chunkdata:
664 break
664 break
665 f = chunkdata["filename"]
665 f = chunkdata["filename"]
666 repo.ui.debug("adding %s revisions\n" % f)
666 repo.ui.debug("adding %s revisions\n" % f)
667 pr()
667 pr()
668 fl = repo.file(f)
668 fl = repo.file(f)
669 o = len(fl)
669 o = len(fl)
670 try:
670 try:
671 if not fl.addgroup(source, revmap, trp):
671 if not fl.addgroup(source, revmap, trp):
672 raise util.Abort(_("received file revlog group is empty"))
672 raise util.Abort(_("received file revlog group is empty"))
673 except error.CensoredBaseError as e:
673 except error.CensoredBaseError as e:
674 raise util.Abort(_("received delta base is censored: %s") % e)
674 raise util.Abort(_("received delta base is censored: %s") % e)
675 revisions += len(fl) - o
675 revisions += len(fl) - o
676 files += 1
676 files += 1
677 if f in needfiles:
677 if f in needfiles:
678 needs = needfiles[f]
678 needs = needfiles[f]
679 for new in xrange(o, len(fl)):
679 for new in xrange(o, len(fl)):
680 n = fl.node(new)
680 n = fl.node(new)
681 if n in needs:
681 if n in needs:
682 needs.remove(n)
682 needs.remove(n)
683 else:
683 else:
684 raise util.Abort(
684 raise util.Abort(
685 _("received spurious file revlog entry"))
685 _("received spurious file revlog entry"))
686 if not needs:
686 if not needs:
687 del needfiles[f]
687 del needfiles[f]
688 repo.ui.progress(_('files'), None)
688 repo.ui.progress(_('files'), None)
689
689
690 for f, needs in needfiles.iteritems():
690 for f, needs in needfiles.iteritems():
691 fl = repo.file(f)
691 fl = repo.file(f)
692 for n in needs:
692 for n in needs:
693 try:
693 try:
694 fl.rev(n)
694 fl.rev(n)
695 except error.LookupError:
695 except error.LookupError:
696 raise util.Abort(
696 raise util.Abort(
697 _('missing file data for %s:%s - run hg verify') %
697 _('missing file data for %s:%s - run hg verify') %
698 (f, hex(n)))
698 (f, hex(n)))
699
699
700 return revisions, files
700 return revisions, files
701
701
702 def addchangegroup(repo, source, srctype, url, emptyok=False,
702 def addchangegroup(repo, source, srctype, url, emptyok=False,
703 targetphase=phases.draft, expectedtotal=None):
703 targetphase=phases.draft, expectedtotal=None):
704 """Add the changegroup returned by source.read() to this repo.
704 """Add the changegroup returned by source.read() to this repo.
705 srctype is a string like 'push', 'pull', or 'unbundle'. url is
705 srctype is a string like 'push', 'pull', or 'unbundle'. url is
706 the URL of the repo where this changegroup is coming from.
706 the URL of the repo where this changegroup is coming from.
707
707
708 Return an integer summarizing the change to this repo:
708 Return an integer summarizing the change to this repo:
709 - nothing changed or no source: 0
709 - nothing changed or no source: 0
710 - more heads than before: 1+added heads (2..n)
710 - more heads than before: 1+added heads (2..n)
711 - fewer heads than before: -1-removed heads (-2..-n)
711 - fewer heads than before: -1-removed heads (-2..-n)
712 - number of heads stays the same: 1
712 - number of heads stays the same: 1
713 """
713 """
714 repo = repo.unfiltered()
714 repo = repo.unfiltered()
715 def csmap(x):
715 def csmap(x):
716 repo.ui.debug("add changeset %s\n" % short(x))
716 repo.ui.debug("add changeset %s\n" % short(x))
717 return len(cl)
717 return len(cl)
718
718
719 def revmap(x):
719 def revmap(x):
720 return cl.rev(x)
720 return cl.rev(x)
721
721
722 if not source:
722 if not source:
723 return 0
723 return 0
724
724
725 changesets = files = revisions = 0
725 changesets = files = revisions = 0
726
726
727 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
727 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
728 # The transaction could have been created before and already carries source
728 # The transaction could have been created before and already carries source
729 # information. In this case we use the top level data. We overwrite the
729 # information. In this case we use the top level data. We overwrite the
730 # argument because we need to use the top level value (if they exist) in
730 # argument because we need to use the top level value (if they exist) in
731 # this function.
731 # this function.
732 srctype = tr.hookargs.setdefault('source', srctype)
732 srctype = tr.hookargs.setdefault('source', srctype)
733 url = tr.hookargs.setdefault('url', url)
733 url = tr.hookargs.setdefault('url', url)
734
734
735 # write changelog data to temp files so concurrent readers will not see
735 # write changelog data to temp files so concurrent readers will not see
736 # inconsistent view
736 # inconsistent view
737 cl = repo.changelog
737 cl = repo.changelog
738 cl.delayupdate(tr)
738 cl.delayupdate(tr)
739 oldheads = cl.heads()
739 oldheads = cl.heads()
740 try:
740 try:
741 repo.hook('prechangegroup', throw=True, **tr.hookargs)
741 repo.hook('prechangegroup', throw=True, **tr.hookargs)
742
742
743 trp = weakref.proxy(tr)
743 trp = weakref.proxy(tr)
744 # pull off the changeset group
744 # pull off the changeset group
745 repo.ui.status(_("adding changesets\n"))
745 repo.ui.status(_("adding changesets\n"))
746 clstart = len(cl)
746 clstart = len(cl)
747 class prog(object):
747 class prog(object):
748 def __init__(self, step, total):
748 def __init__(self, step, total):
749 self._step = step
749 self._step = step
750 self._total = total
750 self._total = total
751 self._count = 1
751 self._count = 1
752 def __call__(self):
752 def __call__(self):
753 repo.ui.progress(self._step, self._count, unit=_('chunks'),
753 repo.ui.progress(self._step, self._count, unit=_('chunks'),
754 total=self._total)
754 total=self._total)
755 self._count += 1
755 self._count += 1
756 source.callback = prog(_('changesets'), expectedtotal)
756 source.callback = prog(_('changesets'), expectedtotal)
757
757
758 efiles = set()
758 efiles = set()
759 def onchangelog(cl, node):
759 def onchangelog(cl, node):
760 efiles.update(cl.read(node)[3])
760 efiles.update(cl.read(node)[3])
761
761
762 source.changelogheader()
762 source.changelogheader()
763 srccontent = cl.addgroup(source, csmap, trp,
763 srccontent = cl.addgroup(source, csmap, trp,
764 addrevisioncb=onchangelog)
764 addrevisioncb=onchangelog)
765 efiles = len(efiles)
765 efiles = len(efiles)
766
766
767 if not (srccontent or emptyok):
767 if not (srccontent or emptyok):
768 raise util.Abort(_("received changelog group is empty"))
768 raise util.Abort(_("received changelog group is empty"))
769 clend = len(cl)
769 clend = len(cl)
770 changesets = clend - clstart
770 changesets = clend - clstart
771 repo.ui.progress(_('changesets'), None)
771 repo.ui.progress(_('changesets'), None)
772
772
773 # pull off the manifest group
773 # pull off the manifest group
774 repo.ui.status(_("adding manifests\n"))
774 repo.ui.status(_("adding manifests\n"))
775 # manifests <= changesets
775 # manifests <= changesets
776 source.callback = prog(_('manifests'), changesets)
776 source.callback = prog(_('manifests'), changesets)
777 # no need to check for empty manifest group here:
777 # no need to check for empty manifest group here:
778 # if the result of the merge of 1 and 2 is the same in 3 and 4,
778 # if the result of the merge of 1 and 2 is the same in 3 and 4,
779 # no new manifest will be created and the manifest group will
779 # no new manifest will be created and the manifest group will
780 # be empty during the pull
780 # be empty during the pull
781 source.manifestheader()
781 source.manifestheader()
782 repo.manifest.addgroup(source, revmap, trp)
782 repo.manifest.addgroup(source, revmap, trp)
783 repo.ui.progress(_('manifests'), None)
783 repo.ui.progress(_('manifests'), None)
784
784
785 needfiles = {}
785 needfiles = {}
786 if repo.ui.configbool('server', 'validate', default=False):
786 if repo.ui.configbool('server', 'validate', default=False):
787 # validate incoming csets have their manifests
787 # validate incoming csets have their manifests
788 for cset in xrange(clstart, clend):
788 for cset in xrange(clstart, clend):
789 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
789 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
790 mfest = repo.manifest.readdelta(mfnode)
790 mfest = repo.manifest.readdelta(mfnode)
791 # store file nodes we must see
791 # store file nodes we must see
792 for f, n in mfest.iteritems():
792 for f, n in mfest.iteritems():
793 needfiles.setdefault(f, set()).add(n)
793 needfiles.setdefault(f, set()).add(n)
794
794
795 # process the files
795 # process the files
796 repo.ui.status(_("adding file changes\n"))
796 repo.ui.status(_("adding file changes\n"))
797 source.callback = None
797 source.callback = None
798 pr = prog(_('files'), efiles)
798 pr = prog(_('files'), efiles)
799 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
799 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
800 needfiles)
800 needfiles)
801 revisions += newrevs
801 revisions += newrevs
802 files += newfiles
802 files += newfiles
803
803
804 dh = 0
804 dh = 0
805 if oldheads:
805 if oldheads:
806 heads = cl.heads()
806 heads = cl.heads()
807 dh = len(heads) - len(oldheads)
807 dh = len(heads) - len(oldheads)
808 for h in heads:
808 for h in heads:
809 if h not in oldheads and repo[h].closesbranch():
809 if h not in oldheads and repo[h].closesbranch():
810 dh -= 1
810 dh -= 1
811 htext = ""
811 htext = ""
812 if dh:
812 if dh:
813 htext = _(" (%+d heads)") % dh
813 htext = _(" (%+d heads)") % dh
814
814
815 repo.ui.status(_("added %d changesets"
815 repo.ui.status(_("added %d changesets"
816 " with %d changes to %d files%s\n")
816 " with %d changes to %d files%s\n")
817 % (changesets, revisions, files, htext))
817 % (changesets, revisions, files, htext))
818 repo.invalidatevolatilesets()
818 repo.invalidatevolatilesets()
819
819
820 if changesets > 0:
820 if changesets > 0:
821 p = lambda: tr.writepending() and repo.root or ""
821 p = lambda: tr.writepending() and repo.root or ""
822 if 'node' not in tr.hookargs:
822 if 'node' not in tr.hookargs:
823 tr.hookargs['node'] = hex(cl.node(clstart))
823 tr.hookargs['node'] = hex(cl.node(clstart))
824 hookargs = dict(tr.hookargs)
824 hookargs = dict(tr.hookargs)
825 else:
825 else:
826 hookargs = dict(tr.hookargs)
826 hookargs = dict(tr.hookargs)
827 hookargs['node'] = hex(cl.node(clstart))
827 hookargs['node'] = hex(cl.node(clstart))
828 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
828 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
829
829
830 added = [cl.node(r) for r in xrange(clstart, clend)]
830 added = [cl.node(r) for r in xrange(clstart, clend)]
831 publishing = repo.publishing()
831 publishing = repo.publishing()
832 if srctype in ('push', 'serve'):
832 if srctype in ('push', 'serve'):
833 # Old servers can not push the boundary themselves.
833 # Old servers can not push the boundary themselves.
834 # New servers won't push the boundary if changeset already
834 # New servers won't push the boundary if changeset already
835 # exists locally as secret
835 # exists locally as secret
836 #
836 #
837 # We should not use added here but the list of all change in
837 # We should not use added here but the list of all change in
838 # the bundle
838 # the bundle
839 if publishing:
839 if publishing:
840 phases.advanceboundary(repo, tr, phases.public, srccontent)
840 phases.advanceboundary(repo, tr, phases.public, srccontent)
841 else:
841 else:
842 # Those changesets have been pushed from the outside, their
842 # Those changesets have been pushed from the outside, their
843 # phases are going to be pushed alongside. Therefor
843 # phases are going to be pushed alongside. Therefor
844 # `targetphase` is ignored.
844 # `targetphase` is ignored.
845 phases.advanceboundary(repo, tr, phases.draft, srccontent)
845 phases.advanceboundary(repo, tr, phases.draft, srccontent)
846 phases.retractboundary(repo, tr, phases.draft, added)
846 phases.retractboundary(repo, tr, phases.draft, added)
847 elif srctype != 'strip':
847 elif srctype != 'strip':
848 # publishing only alter behavior during push
848 # publishing only alter behavior during push
849 #
849 #
850 # strip should not touch boundary at all
850 # strip should not touch boundary at all
851 phases.retractboundary(repo, tr, targetphase, added)
851 phases.retractboundary(repo, tr, targetphase, added)
852
852
853 if changesets > 0:
853 if changesets > 0:
854 if srctype != 'strip':
854 if srctype != 'strip':
855 # During strip, branchcache is invalid but coming call to
855 # During strip, branchcache is invalid but coming call to
856 # `destroyed` will repair it.
856 # `destroyed` will repair it.
857 # In other case we can safely update cache on disk.
857 # In other case we can safely update cache on disk.
858 branchmap.updatecache(repo.filtered('served'))
858 branchmap.updatecache(repo.filtered('served'))
859
859
860 def runhooks():
860 def runhooks():
861 # These hooks run when the lock releases, not when the
861 # These hooks run when the lock releases, not when the
862 # transaction closes. So it's possible for the changelog
862 # transaction closes. So it's possible for the changelog
863 # to have changed since we last saw it.
863 # to have changed since we last saw it.
864 if clstart >= len(repo):
864 if clstart >= len(repo):
865 return
865 return
866
866
867 # forcefully update the on-disk branch cache
867 # forcefully update the on-disk branch cache
868 repo.ui.debug("updating the branch cache\n")
868 repo.ui.debug("updating the branch cache\n")
869 repo.hook("changegroup", **hookargs)
869 repo.hook("changegroup", **hookargs)
870
870
871 for n in added:
871 for n in added:
872 args = hookargs.copy()
872 args = hookargs.copy()
873 args['node'] = hex(n)
873 args['node'] = hex(n)
874 repo.hook("incoming", **args)
874 repo.hook("incoming", **args)
875
875
876 newheads = [h for h in repo.heads() if h not in oldheads]
876 newheads = [h for h in repo.heads() if h not in oldheads]
877 repo.ui.log("incoming",
877 repo.ui.log("incoming",
878 "%s incoming changes - new heads: %s\n",
878 "%s incoming changes - new heads: %s\n",
879 len(added),
879 len(added),
880 ', '.join([hex(c[:6]) for c in newheads]))
880 ', '.join([hex(c[:6]) for c in newheads]))
881
881
882 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
882 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
883 lambda tr: repo._afterlock(runhooks))
883 lambda tr: repo._afterlock(runhooks))
884
884
885 tr.close()
885 tr.close()
886
886
887 finally:
887 finally:
888 tr.release()
888 tr.release()
889 repo.ui.flush()
889 repo.ui.flush()
890 # never return 0 here:
890 # never return 0 here:
891 if dh < 0:
891 if dh < 0:
892 return dh - 1
892 return dh - 1
893 else:
893 else:
894 return dh + 1
894 return dh + 1
@@ -1,169 +1,199 b''
1 #require killdaemons
1 #require killdaemons
2
2
3 This test checks behavior related to bundle1 that changed or is likely
3 This test checks behavior related to bundle1 that changed or is likely
4 to change with bundle2. Feel free to factor out any part of the test
4 to change with bundle2. Feel free to factor out any part of the test
5 which does not need to exist to keep bundle1 working.
5 which does not need to exist to keep bundle1 working.
6
6
7 $ cat << EOF >> $HGRCPATH
7 $ cat << EOF >> $HGRCPATH
8 > [experimental]
8 > [experimental]
9 > # This test is dedicated to interaction through old bundle
9 > # This test is dedicated to interaction through old bundle
10 > bundle2-exp = False
10 > bundle2-exp = False
11 > EOF
11 > EOF
12
12
13 $ hg init test
13 $ hg init test
14 $ cd test
14 $ cd test
15 $ echo a > a
15 $ echo a > a
16 $ hg ci -Ama
16 $ hg ci -Ama
17 adding a
17 adding a
18 $ cd ..
18 $ cd ..
19 $ hg clone test test2
19 $ hg clone test test2
20 updating to branch default
20 updating to branch default
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 $ cd test2
22 $ cd test2
23 $ echo a >> a
23 $ echo a >> a
24 $ hg ci -mb
24 $ hg ci -mb
25 $ req() {
25 $ req() {
26 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
26 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
27 > cat hg.pid >> $DAEMON_PIDS
27 > cat hg.pid >> $DAEMON_PIDS
28 > hg --cwd ../test2 push http://localhost:$HGPORT/
28 > hg --cwd ../test2 push http://localhost:$HGPORT/
29 > exitstatus=$?
29 > exitstatus=$?
30 > killdaemons.py
30 > killdaemons.py
31 > echo % serve errors
31 > echo % serve errors
32 > cat errors.log
32 > cat errors.log
33 > return $exitstatus
33 > return $exitstatus
34 > }
34 > }
35 $ cd ../test
35 $ cd ../test
36
36
37 expect ssl error
37 expect ssl error
38
38
39 $ req
39 $ req
40 pushing to http://localhost:$HGPORT/
40 pushing to http://localhost:$HGPORT/
41 searching for changes
41 searching for changes
42 abort: HTTP Error 403: ssl required
42 abort: HTTP Error 403: ssl required
43 % serve errors
43 % serve errors
44 [255]
44 [255]
45
45
46 expect authorization error
46 expect authorization error
47
47
48 $ echo '[web]' > .hg/hgrc
48 $ echo '[web]' > .hg/hgrc
49 $ echo 'push_ssl = false' >> .hg/hgrc
49 $ echo 'push_ssl = false' >> .hg/hgrc
50 $ req
50 $ req
51 pushing to http://localhost:$HGPORT/
51 pushing to http://localhost:$HGPORT/
52 searching for changes
52 searching for changes
53 abort: authorization failed
53 abort: authorization failed
54 % serve errors
54 % serve errors
55 [255]
55 [255]
56
56
57 expect authorization error: must have authorized user
57 expect authorization error: must have authorized user
58
58
59 $ echo 'allow_push = unperson' >> .hg/hgrc
59 $ echo 'allow_push = unperson' >> .hg/hgrc
60 $ req
60 $ req
61 pushing to http://localhost:$HGPORT/
61 pushing to http://localhost:$HGPORT/
62 searching for changes
62 searching for changes
63 abort: authorization failed
63 abort: authorization failed
64 % serve errors
64 % serve errors
65 [255]
65 [255]
66
66
67 expect success
67 expect success
68
68
69 $ echo 'allow_push = *' >> .hg/hgrc
69 $ echo 'allow_push = *' >> .hg/hgrc
70 $ echo '[hooks]' >> .hg/hgrc
70 $ echo '[hooks]' >> .hg/hgrc
71 $ echo "changegroup = printenv.py changegroup 0" >> .hg/hgrc
71 $ echo "changegroup = printenv.py changegroup 0" >> .hg/hgrc
72 $ echo "pushkey = printenv.py pushkey 0" >> .hg/hgrc
72 $ echo "pushkey = printenv.py pushkey 0" >> .hg/hgrc
73 $ req
73 $ req
74 pushing to http://localhost:$HGPORT/
74 pushing to http://localhost:$HGPORT/
75 searching for changes
75 searching for changes
76 remote: adding changesets
76 remote: adding changesets
77 remote: adding manifests
77 remote: adding manifests
78 remote: adding file changes
78 remote: adding file changes
79 remote: added 1 changesets with 1 changes to 1 files
79 remote: added 1 changesets with 1 changes to 1 files
80 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
80 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
81 % serve errors
81 % serve errors
82 $ hg rollback
82 $ hg rollback
83 repository tip rolled back to revision 0 (undo serve)
83 repository tip rolled back to revision 0 (undo serve)
84
84
85 expect success, server lacks the httpheader capability
85 expect success, server lacks the httpheader capability
86
86
87 $ CAP=httpheader
87 $ CAP=httpheader
88 $ . "$TESTDIR/notcapable"
88 $ . "$TESTDIR/notcapable"
89 $ req
89 $ req
90 pushing to http://localhost:$HGPORT/
90 pushing to http://localhost:$HGPORT/
91 searching for changes
91 searching for changes
92 remote: adding changesets
92 remote: adding changesets
93 remote: adding manifests
93 remote: adding manifests
94 remote: adding file changes
94 remote: adding file changes
95 remote: added 1 changesets with 1 changes to 1 files
95 remote: added 1 changesets with 1 changes to 1 files
96 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
96 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
97 % serve errors
97 % serve errors
98 $ hg rollback
98 $ hg rollback
99 repository tip rolled back to revision 0 (undo serve)
99 repository tip rolled back to revision 0 (undo serve)
100
100
101 expect success, server lacks the unbundlehash capability
101 expect success, server lacks the unbundlehash capability
102
102
103 $ CAP=unbundlehash
103 $ CAP=unbundlehash
104 $ . "$TESTDIR/notcapable"
104 $ . "$TESTDIR/notcapable"
105 $ req
105 $ req
106 pushing to http://localhost:$HGPORT/
106 pushing to http://localhost:$HGPORT/
107 searching for changes
107 searching for changes
108 remote: adding changesets
108 remote: adding changesets
109 remote: adding manifests
109 remote: adding manifests
110 remote: adding file changes
110 remote: adding file changes
111 remote: added 1 changesets with 1 changes to 1 files
111 remote: added 1 changesets with 1 changes to 1 files
112 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
112 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
113 % serve errors
113 % serve errors
114 $ hg rollback
114 $ hg rollback
115 repository tip rolled back to revision 0 (undo serve)
115 repository tip rolled back to revision 0 (undo serve)
116
116
117 expect success, pre-d1b16a746db6 server supports the unbundle capability, but
118 has no parameter
119
120 $ cat <<EOF > notcapable-unbundleparam.py
121 > from mercurial import extensions, httppeer
122 > def capable(orig, self, name):
123 > if name == 'unbundle':
124 > return True
125 > return orig(self, name)
126 > def uisetup(ui):
127 > extensions.wrapfunction(httppeer.httppeer, 'capable', capable)
128 > EOF
129 $ cp $HGRCPATH $HGRCPATH.orig
130 $ cat <<EOF >> $HGRCPATH
131 > [extensions]
132 > notcapable-unbundleparam = `pwd`/notcapable-unbundleparam.py
133 > EOF
134 $ req
135 pushing to http://localhost:$HGPORT/
136 searching for changes
137 remote: adding changesets
138 remote: adding manifests
139 remote: adding file changes
140 remote: added 1 changesets with 1 changes to 1 files
141 remote: changegroup hook: * (glob)
142 % serve errors
143 $ hg rollback
144 repository tip rolled back to revision 0 (undo serve)
145 $ mv $HGRCPATH.orig $HGRCPATH
146
117 expect push success, phase change failure
147 expect push success, phase change failure
118
148
119 $ cat > .hg/hgrc <<EOF
149 $ cat > .hg/hgrc <<EOF
120 > [web]
150 > [web]
121 > push_ssl = false
151 > push_ssl = false
122 > allow_push = *
152 > allow_push = *
123 > [hooks]
153 > [hooks]
124 > prepushkey = printenv.py prepushkey 1
154 > prepushkey = printenv.py prepushkey 1
125 > EOF
155 > EOF
126 $ req
156 $ req
127 pushing to http://localhost:$HGPORT/
157 pushing to http://localhost:$HGPORT/
128 searching for changes
158 searching for changes
129 remote: adding changesets
159 remote: adding changesets
130 remote: adding manifests
160 remote: adding manifests
131 remote: adding file changes
161 remote: adding file changes
132 remote: added 1 changesets with 1 changes to 1 files
162 remote: added 1 changesets with 1 changes to 1 files
133 % serve errors
163 % serve errors
134
164
135 expect phase change success
165 expect phase change success
136
166
137 $ echo "prepushkey = printenv.py prepushkey 0" >> .hg/hgrc
167 $ echo "prepushkey = printenv.py prepushkey 0" >> .hg/hgrc
138 $ req
168 $ req
139 pushing to http://localhost:$HGPORT/
169 pushing to http://localhost:$HGPORT/
140 searching for changes
170 searching for changes
141 no changes found
171 no changes found
142 % serve errors
172 % serve errors
143 [1]
173 [1]
144 $ hg rollback
174 $ hg rollback
145 repository tip rolled back to revision 0 (undo serve)
175 repository tip rolled back to revision 0 (undo serve)
146
176
147 expect authorization error: all users denied
177 expect authorization error: all users denied
148
178
149 $ echo '[web]' > .hg/hgrc
179 $ echo '[web]' > .hg/hgrc
150 $ echo 'push_ssl = false' >> .hg/hgrc
180 $ echo 'push_ssl = false' >> .hg/hgrc
151 $ echo 'deny_push = *' >> .hg/hgrc
181 $ echo 'deny_push = *' >> .hg/hgrc
152 $ req
182 $ req
153 pushing to http://localhost:$HGPORT/
183 pushing to http://localhost:$HGPORT/
154 searching for changes
184 searching for changes
155 abort: authorization failed
185 abort: authorization failed
156 % serve errors
186 % serve errors
157 [255]
187 [255]
158
188
159 expect authorization error: some users denied, users must be authenticated
189 expect authorization error: some users denied, users must be authenticated
160
190
161 $ echo 'deny_push = unperson' >> .hg/hgrc
191 $ echo 'deny_push = unperson' >> .hg/hgrc
162 $ req
192 $ req
163 pushing to http://localhost:$HGPORT/
193 pushing to http://localhost:$HGPORT/
164 searching for changes
194 searching for changes
165 abort: authorization failed
195 abort: authorization failed
166 % serve errors
196 % serve errors
167 [255]
197 [255]
168
198
169 $ cd ..
199 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now