##// END OF EJS Templates
changegroup: use the 'postclose' API on transaction...
Pierre-Yves David -
r23221:cadc9a72 default
parent child Browse files
Show More
@@ -1,830 +1,831 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
17
17
18 def readexactly(stream, n):
18 def readexactly(stream, n):
19 '''read n bytes from stream.read and abort if less was available'''
19 '''read n bytes from stream.read and abort if less was available'''
20 s = stream.read(n)
20 s = stream.read(n)
21 if len(s) < n:
21 if len(s) < n:
22 raise util.Abort(_("stream ended unexpectedly"
22 raise util.Abort(_("stream ended unexpectedly"
23 " (got %d bytes, expected %d)")
23 " (got %d bytes, expected %d)")
24 % (len(s), n))
24 % (len(s), n))
25 return s
25 return s
26
26
27 def getchunk(stream):
27 def getchunk(stream):
28 """return the next chunk from stream as a string"""
28 """return the next chunk from stream as a string"""
29 d = readexactly(stream, 4)
29 d = readexactly(stream, 4)
30 l = struct.unpack(">l", d)[0]
30 l = struct.unpack(">l", d)[0]
31 if l <= 4:
31 if l <= 4:
32 if l:
32 if l:
33 raise util.Abort(_("invalid chunk length %d") % l)
33 raise util.Abort(_("invalid chunk length %d") % l)
34 return ""
34 return ""
35 return readexactly(stream, l - 4)
35 return readexactly(stream, l - 4)
36
36
37 def chunkheader(length):
37 def chunkheader(length):
38 """return a changegroup chunk header (string)"""
38 """return a changegroup chunk header (string)"""
39 return struct.pack(">l", length + 4)
39 return struct.pack(">l", length + 4)
40
40
41 def closechunk():
41 def closechunk():
42 """return a changegroup chunk header (string) for a zero-length chunk"""
42 """return a changegroup chunk header (string) for a zero-length chunk"""
43 return struct.pack(">l", 0)
43 return struct.pack(">l", 0)
44
44
45 class nocompress(object):
45 class nocompress(object):
46 def compress(self, x):
46 def compress(self, x):
47 return x
47 return x
48 def flush(self):
48 def flush(self):
49 return ""
49 return ""
50
50
51 bundletypes = {
51 bundletypes = {
52 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 "": ("", nocompress), # only when using unbundle on ssh and old http servers
53 # since the unification ssh accepts a header but there
53 # since the unification ssh accepts a header but there
54 # is no capability signaling it.
54 # is no capability signaling it.
55 "HG10UN": ("HG10UN", nocompress),
55 "HG10UN": ("HG10UN", nocompress),
56 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
57 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
58 }
58 }
59
59
60 # hgweb uses this list to communicate its preferred type
60 # hgweb uses this list to communicate its preferred type
61 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
62
62
63 def writebundle(cg, filename, bundletype, vfs=None):
63 def writebundle(cg, filename, bundletype, vfs=None):
64 """Write a bundle file and return its filename.
64 """Write a bundle file and return its filename.
65
65
66 Existing files will not be overwritten.
66 Existing files will not be overwritten.
67 If no filename is specified, a temporary file is created.
67 If no filename is specified, a temporary file is created.
68 bz2 compression can be turned off.
68 bz2 compression can be turned off.
69 The bundle file will be deleted in case of errors.
69 The bundle file will be deleted in case of errors.
70 """
70 """
71
71
72 fh = None
72 fh = None
73 cleanup = None
73 cleanup = None
74 try:
74 try:
75 if filename:
75 if filename:
76 if vfs:
76 if vfs:
77 fh = vfs.open(filename, "wb")
77 fh = vfs.open(filename, "wb")
78 else:
78 else:
79 fh = open(filename, "wb")
79 fh = open(filename, "wb")
80 else:
80 else:
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 fh = os.fdopen(fd, "wb")
82 fh = os.fdopen(fd, "wb")
83 cleanup = filename
83 cleanup = filename
84
84
85 header, compressor = bundletypes[bundletype]
85 header, compressor = bundletypes[bundletype]
86 fh.write(header)
86 fh.write(header)
87 z = compressor()
87 z = compressor()
88
88
89 # parse the changegroup data, otherwise we will block
89 # parse the changegroup data, otherwise we will block
90 # in case of sshrepo because we don't know the end of the stream
90 # in case of sshrepo because we don't know the end of the stream
91
91
92 # an empty chunkgroup is the end of the changegroup
92 # an empty chunkgroup is the end of the changegroup
93 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 # a changegroup has at least 2 chunkgroups (changelog and manifest).
94 # after that, an empty chunkgroup is the end of the changegroup
94 # after that, an empty chunkgroup is the end of the changegroup
95 for chunk in cg.getchunks():
95 for chunk in cg.getchunks():
96 fh.write(z.compress(chunk))
96 fh.write(z.compress(chunk))
97 fh.write(z.flush())
97 fh.write(z.flush())
98 cleanup = None
98 cleanup = None
99 return filename
99 return filename
100 finally:
100 finally:
101 if fh is not None:
101 if fh is not None:
102 fh.close()
102 fh.close()
103 if cleanup is not None:
103 if cleanup is not None:
104 if filename and vfs:
104 if filename and vfs:
105 vfs.unlink(cleanup)
105 vfs.unlink(cleanup)
106 else:
106 else:
107 os.unlink(cleanup)
107 os.unlink(cleanup)
108
108
109 def decompressor(fh, alg):
109 def decompressor(fh, alg):
110 if alg == 'UN':
110 if alg == 'UN':
111 return fh
111 return fh
112 elif alg == 'GZ':
112 elif alg == 'GZ':
113 def generator(f):
113 def generator(f):
114 zd = zlib.decompressobj()
114 zd = zlib.decompressobj()
115 for chunk in util.filechunkiter(f):
115 for chunk in util.filechunkiter(f):
116 yield zd.decompress(chunk)
116 yield zd.decompress(chunk)
117 elif alg == 'BZ':
117 elif alg == 'BZ':
118 def generator(f):
118 def generator(f):
119 zd = bz2.BZ2Decompressor()
119 zd = bz2.BZ2Decompressor()
120 zd.decompress("BZ")
120 zd.decompress("BZ")
121 for chunk in util.filechunkiter(f, 4096):
121 for chunk in util.filechunkiter(f, 4096):
122 yield zd.decompress(chunk)
122 yield zd.decompress(chunk)
123 else:
123 else:
124 raise util.Abort("unknown bundle compression '%s'" % alg)
124 raise util.Abort("unknown bundle compression '%s'" % alg)
125 return util.chunkbuffer(generator(fh))
125 return util.chunkbuffer(generator(fh))
126
126
127 class cg1unpacker(object):
127 class cg1unpacker(object):
128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
129 deltaheadersize = struct.calcsize(deltaheader)
129 deltaheadersize = struct.calcsize(deltaheader)
130 def __init__(self, fh, alg):
130 def __init__(self, fh, alg):
131 self._stream = decompressor(fh, alg)
131 self._stream = decompressor(fh, alg)
132 self._type = alg
132 self._type = alg
133 self.callback = None
133 self.callback = None
134 def compressed(self):
134 def compressed(self):
135 return self._type != 'UN'
135 return self._type != 'UN'
136 def read(self, l):
136 def read(self, l):
137 return self._stream.read(l)
137 return self._stream.read(l)
138 def seek(self, pos):
138 def seek(self, pos):
139 return self._stream.seek(pos)
139 return self._stream.seek(pos)
140 def tell(self):
140 def tell(self):
141 return self._stream.tell()
141 return self._stream.tell()
142 def close(self):
142 def close(self):
143 return self._stream.close()
143 return self._stream.close()
144
144
145 def chunklength(self):
145 def chunklength(self):
146 d = readexactly(self._stream, 4)
146 d = readexactly(self._stream, 4)
147 l = struct.unpack(">l", d)[0]
147 l = struct.unpack(">l", d)[0]
148 if l <= 4:
148 if l <= 4:
149 if l:
149 if l:
150 raise util.Abort(_("invalid chunk length %d") % l)
150 raise util.Abort(_("invalid chunk length %d") % l)
151 return 0
151 return 0
152 if self.callback:
152 if self.callback:
153 self.callback()
153 self.callback()
154 return l - 4
154 return l - 4
155
155
156 def changelogheader(self):
156 def changelogheader(self):
157 """v10 does not have a changelog header chunk"""
157 """v10 does not have a changelog header chunk"""
158 return {}
158 return {}
159
159
160 def manifestheader(self):
160 def manifestheader(self):
161 """v10 does not have a manifest header chunk"""
161 """v10 does not have a manifest header chunk"""
162 return {}
162 return {}
163
163
164 def filelogheader(self):
164 def filelogheader(self):
165 """return the header of the filelogs chunk, v10 only has the filename"""
165 """return the header of the filelogs chunk, v10 only has the filename"""
166 l = self.chunklength()
166 l = self.chunklength()
167 if not l:
167 if not l:
168 return {}
168 return {}
169 fname = readexactly(self._stream, l)
169 fname = readexactly(self._stream, l)
170 return {'filename': fname}
170 return {'filename': fname}
171
171
172 def _deltaheader(self, headertuple, prevnode):
172 def _deltaheader(self, headertuple, prevnode):
173 node, p1, p2, cs = headertuple
173 node, p1, p2, cs = headertuple
174 if prevnode is None:
174 if prevnode is None:
175 deltabase = p1
175 deltabase = p1
176 else:
176 else:
177 deltabase = prevnode
177 deltabase = prevnode
178 return node, p1, p2, deltabase, cs
178 return node, p1, p2, deltabase, cs
179
179
180 def deltachunk(self, prevnode):
180 def deltachunk(self, prevnode):
181 l = self.chunklength()
181 l = self.chunklength()
182 if not l:
182 if not l:
183 return {}
183 return {}
184 headerdata = readexactly(self._stream, self.deltaheadersize)
184 headerdata = readexactly(self._stream, self.deltaheadersize)
185 header = struct.unpack(self.deltaheader, headerdata)
185 header = struct.unpack(self.deltaheader, headerdata)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
187 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
188 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
189 'deltabase': deltabase, 'delta': delta}
189 'deltabase': deltabase, 'delta': delta}
190
190
191 def getchunks(self):
191 def getchunks(self):
192 """returns all the chunks contains in the bundle
192 """returns all the chunks contains in the bundle
193
193
194 Used when you need to forward the binary stream to a file or another
194 Used when you need to forward the binary stream to a file or another
195 network API. To do so, it parse the changegroup data, otherwise it will
195 network API. To do so, it parse the changegroup data, otherwise it will
196 block in case of sshrepo because it don't know the end of the stream.
196 block in case of sshrepo because it don't know the end of the stream.
197 """
197 """
198 # an empty chunkgroup is the end of the changegroup
198 # an empty chunkgroup is the end of the changegroup
199 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 # a changegroup has at least 2 chunkgroups (changelog and manifest).
200 # after that, an empty chunkgroup is the end of the changegroup
200 # after that, an empty chunkgroup is the end of the changegroup
201 empty = False
201 empty = False
202 count = 0
202 count = 0
203 while not empty or count <= 2:
203 while not empty or count <= 2:
204 empty = True
204 empty = True
205 count += 1
205 count += 1
206 while True:
206 while True:
207 chunk = getchunk(self)
207 chunk = getchunk(self)
208 if not chunk:
208 if not chunk:
209 break
209 break
210 empty = False
210 empty = False
211 yield chunkheader(len(chunk))
211 yield chunkheader(len(chunk))
212 pos = 0
212 pos = 0
213 while pos < len(chunk):
213 while pos < len(chunk):
214 next = pos + 2**20
214 next = pos + 2**20
215 yield chunk[pos:next]
215 yield chunk[pos:next]
216 pos = next
216 pos = next
217 yield closechunk()
217 yield closechunk()
218
218
219 class cg2unpacker(cg1unpacker):
219 class cg2unpacker(cg1unpacker):
220 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
220 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
221 deltaheadersize = struct.calcsize(deltaheader)
221 deltaheadersize = struct.calcsize(deltaheader)
222
222
223 def _deltaheader(self, headertuple, prevnode):
223 def _deltaheader(self, headertuple, prevnode):
224 node, p1, p2, deltabase, cs = headertuple
224 node, p1, p2, deltabase, cs = headertuple
225 return node, p1, p2, deltabase, cs
225 return node, p1, p2, deltabase, cs
226
226
227 class headerlessfixup(object):
227 class headerlessfixup(object):
228 def __init__(self, fh, h):
228 def __init__(self, fh, h):
229 self._h = h
229 self._h = h
230 self._fh = fh
230 self._fh = fh
231 def read(self, n):
231 def read(self, n):
232 if self._h:
232 if self._h:
233 d, self._h = self._h[:n], self._h[n:]
233 d, self._h = self._h[:n], self._h[n:]
234 if len(d) < n:
234 if len(d) < n:
235 d += readexactly(self._fh, n - len(d))
235 d += readexactly(self._fh, n - len(d))
236 return d
236 return d
237 return readexactly(self._fh, n)
237 return readexactly(self._fh, n)
238
238
239 class cg1packer(object):
239 class cg1packer(object):
240 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
240 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
241 def __init__(self, repo, bundlecaps=None):
241 def __init__(self, repo, bundlecaps=None):
242 """Given a source repo, construct a bundler.
242 """Given a source repo, construct a bundler.
243
243
244 bundlecaps is optional and can be used to specify the set of
244 bundlecaps is optional and can be used to specify the set of
245 capabilities which can be used to build the bundle.
245 capabilities which can be used to build the bundle.
246 """
246 """
247 # Set of capabilities we can use to build the bundle.
247 # Set of capabilities we can use to build the bundle.
248 if bundlecaps is None:
248 if bundlecaps is None:
249 bundlecaps = set()
249 bundlecaps = set()
250 self._bundlecaps = bundlecaps
250 self._bundlecaps = bundlecaps
251 self._changelog = repo.changelog
251 self._changelog = repo.changelog
252 self._manifest = repo.manifest
252 self._manifest = repo.manifest
253 reorder = repo.ui.config('bundle', 'reorder', 'auto')
253 reorder = repo.ui.config('bundle', 'reorder', 'auto')
254 if reorder == 'auto':
254 if reorder == 'auto':
255 reorder = None
255 reorder = None
256 else:
256 else:
257 reorder = util.parsebool(reorder)
257 reorder = util.parsebool(reorder)
258 self._repo = repo
258 self._repo = repo
259 self._reorder = reorder
259 self._reorder = reorder
260 self._progress = repo.ui.progress
260 self._progress = repo.ui.progress
261 def close(self):
261 def close(self):
262 return closechunk()
262 return closechunk()
263
263
264 def fileheader(self, fname):
264 def fileheader(self, fname):
265 return chunkheader(len(fname)) + fname
265 return chunkheader(len(fname)) + fname
266
266
267 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
267 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
268 """Calculate a delta group, yielding a sequence of changegroup chunks
268 """Calculate a delta group, yielding a sequence of changegroup chunks
269 (strings).
269 (strings).
270
270
271 Given a list of changeset revs, return a set of deltas and
271 Given a list of changeset revs, return a set of deltas and
272 metadata corresponding to nodes. The first delta is
272 metadata corresponding to nodes. The first delta is
273 first parent(nodelist[0]) -> nodelist[0], the receiver is
273 first parent(nodelist[0]) -> nodelist[0], the receiver is
274 guaranteed to have this parent as it has all history before
274 guaranteed to have this parent as it has all history before
275 these changesets. In the case firstparent is nullrev the
275 these changesets. In the case firstparent is nullrev the
276 changegroup starts with a full revision.
276 changegroup starts with a full revision.
277
277
278 If units is not None, progress detail will be generated, units specifies
278 If units is not None, progress detail will be generated, units specifies
279 the type of revlog that is touched (changelog, manifest, etc.).
279 the type of revlog that is touched (changelog, manifest, etc.).
280 """
280 """
281 # if we don't have any revisions touched by these changesets, bail
281 # if we don't have any revisions touched by these changesets, bail
282 if len(nodelist) == 0:
282 if len(nodelist) == 0:
283 yield self.close()
283 yield self.close()
284 return
284 return
285
285
286 # for generaldelta revlogs, we linearize the revs; this will both be
286 # for generaldelta revlogs, we linearize the revs; this will both be
287 # much quicker and generate a much smaller bundle
287 # much quicker and generate a much smaller bundle
288 if (revlog._generaldelta and reorder is not False) or reorder:
288 if (revlog._generaldelta and reorder is not False) or reorder:
289 dag = dagutil.revlogdag(revlog)
289 dag = dagutil.revlogdag(revlog)
290 revs = set(revlog.rev(n) for n in nodelist)
290 revs = set(revlog.rev(n) for n in nodelist)
291 revs = dag.linearize(revs)
291 revs = dag.linearize(revs)
292 else:
292 else:
293 revs = sorted([revlog.rev(n) for n in nodelist])
293 revs = sorted([revlog.rev(n) for n in nodelist])
294
294
295 # add the parent of the first rev
295 # add the parent of the first rev
296 p = revlog.parentrevs(revs[0])[0]
296 p = revlog.parentrevs(revs[0])[0]
297 revs.insert(0, p)
297 revs.insert(0, p)
298
298
299 # build deltas
299 # build deltas
300 total = len(revs) - 1
300 total = len(revs) - 1
301 msgbundling = _('bundling')
301 msgbundling = _('bundling')
302 for r in xrange(len(revs) - 1):
302 for r in xrange(len(revs) - 1):
303 if units is not None:
303 if units is not None:
304 self._progress(msgbundling, r + 1, unit=units, total=total)
304 self._progress(msgbundling, r + 1, unit=units, total=total)
305 prev, curr = revs[r], revs[r + 1]
305 prev, curr = revs[r], revs[r + 1]
306 linknode = lookup(revlog.node(curr))
306 linknode = lookup(revlog.node(curr))
307 for c in self.revchunk(revlog, curr, prev, linknode):
307 for c in self.revchunk(revlog, curr, prev, linknode):
308 yield c
308 yield c
309
309
310 yield self.close()
310 yield self.close()
311
311
312 # filter any nodes that claim to be part of the known set
312 # filter any nodes that claim to be part of the known set
313 def prune(self, revlog, missing, commonrevs, source):
313 def prune(self, revlog, missing, commonrevs, source):
314 rr, rl = revlog.rev, revlog.linkrev
314 rr, rl = revlog.rev, revlog.linkrev
315 return [n for n in missing if rl(rr(n)) not in commonrevs]
315 return [n for n in missing if rl(rr(n)) not in commonrevs]
316
316
317 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
317 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
318 '''yield a sequence of changegroup chunks (strings)'''
318 '''yield a sequence of changegroup chunks (strings)'''
319 repo = self._repo
319 repo = self._repo
320 cl = self._changelog
320 cl = self._changelog
321 mf = self._manifest
321 mf = self._manifest
322 reorder = self._reorder
322 reorder = self._reorder
323 progress = self._progress
323 progress = self._progress
324
324
325 # for progress output
325 # for progress output
326 msgbundling = _('bundling')
326 msgbundling = _('bundling')
327
327
328 mfs = {} # needed manifests
328 mfs = {} # needed manifests
329 fnodes = {} # needed file nodes
329 fnodes = {} # needed file nodes
330 changedfiles = set()
330 changedfiles = set()
331
331
332 # Callback for the changelog, used to collect changed files and manifest
332 # Callback for the changelog, used to collect changed files and manifest
333 # nodes.
333 # nodes.
334 # Returns the linkrev node (identity in the changelog case).
334 # Returns the linkrev node (identity in the changelog case).
335 def lookupcl(x):
335 def lookupcl(x):
336 c = cl.read(x)
336 c = cl.read(x)
337 changedfiles.update(c[3])
337 changedfiles.update(c[3])
338 # record the first changeset introducing this manifest version
338 # record the first changeset introducing this manifest version
339 mfs.setdefault(c[0], x)
339 mfs.setdefault(c[0], x)
340 return x
340 return x
341
341
342 # Callback for the manifest, used to collect linkrevs for filelog
342 # Callback for the manifest, used to collect linkrevs for filelog
343 # revisions.
343 # revisions.
344 # Returns the linkrev node (collected in lookupcl).
344 # Returns the linkrev node (collected in lookupcl).
345 def lookupmf(x):
345 def lookupmf(x):
346 clnode = mfs[x]
346 clnode = mfs[x]
347 if not fastpathlinkrev:
347 if not fastpathlinkrev:
348 mdata = mf.readfast(x)
348 mdata = mf.readfast(x)
349 for f, n in mdata.iteritems():
349 for f, n in mdata.iteritems():
350 if f in changedfiles:
350 if f in changedfiles:
351 # record the first changeset introducing this filelog
351 # record the first changeset introducing this filelog
352 # version
352 # version
353 fnodes[f].setdefault(n, clnode)
353 fnodes[f].setdefault(n, clnode)
354 return clnode
354 return clnode
355
355
356 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
356 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
357 reorder=reorder):
357 reorder=reorder):
358 yield chunk
358 yield chunk
359 progress(msgbundling, None)
359 progress(msgbundling, None)
360
360
361 for f in changedfiles:
361 for f in changedfiles:
362 fnodes[f] = {}
362 fnodes[f] = {}
363 mfnodes = self.prune(mf, mfs, commonrevs, source)
363 mfnodes = self.prune(mf, mfs, commonrevs, source)
364 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
364 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
365 reorder=reorder):
365 reorder=reorder):
366 yield chunk
366 yield chunk
367 progress(msgbundling, None)
367 progress(msgbundling, None)
368
368
369 mfs.clear()
369 mfs.clear()
370 needed = set(cl.rev(x) for x in clnodes)
370 needed = set(cl.rev(x) for x in clnodes)
371
371
372 def linknodes(filerevlog, fname):
372 def linknodes(filerevlog, fname):
373 if fastpathlinkrev:
373 if fastpathlinkrev:
374 llr = filerevlog.linkrev
374 llr = filerevlog.linkrev
375 def genfilenodes():
375 def genfilenodes():
376 for r in filerevlog:
376 for r in filerevlog:
377 linkrev = llr(r)
377 linkrev = llr(r)
378 if linkrev in needed:
378 if linkrev in needed:
379 yield filerevlog.node(r), cl.node(linkrev)
379 yield filerevlog.node(r), cl.node(linkrev)
380 fnodes[fname] = dict(genfilenodes())
380 fnodes[fname] = dict(genfilenodes())
381 return fnodes.get(fname, {})
381 return fnodes.get(fname, {})
382
382
383 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
383 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
384 source):
384 source):
385 yield chunk
385 yield chunk
386
386
387 yield self.close()
387 yield self.close()
388 progress(msgbundling, None)
388 progress(msgbundling, None)
389
389
390 if clnodes:
390 if clnodes:
391 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
391 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
392
392
393 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
393 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
394 repo = self._repo
394 repo = self._repo
395 progress = self._progress
395 progress = self._progress
396 reorder = self._reorder
396 reorder = self._reorder
397 msgbundling = _('bundling')
397 msgbundling = _('bundling')
398
398
399 total = len(changedfiles)
399 total = len(changedfiles)
400 # for progress output
400 # for progress output
401 msgfiles = _('files')
401 msgfiles = _('files')
402 for i, fname in enumerate(sorted(changedfiles)):
402 for i, fname in enumerate(sorted(changedfiles)):
403 filerevlog = repo.file(fname)
403 filerevlog = repo.file(fname)
404 if not filerevlog:
404 if not filerevlog:
405 raise util.Abort(_("empty or missing revlog for %s") % fname)
405 raise util.Abort(_("empty or missing revlog for %s") % fname)
406
406
407 linkrevnodes = linknodes(filerevlog, fname)
407 linkrevnodes = linknodes(filerevlog, fname)
408 # Lookup for filenodes, we collected the linkrev nodes above in the
408 # Lookup for filenodes, we collected the linkrev nodes above in the
409 # fastpath case and with lookupmf in the slowpath case.
409 # fastpath case and with lookupmf in the slowpath case.
410 def lookupfilelog(x):
410 def lookupfilelog(x):
411 return linkrevnodes[x]
411 return linkrevnodes[x]
412
412
413 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
413 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
414 if filenodes:
414 if filenodes:
415 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
415 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
416 total=total)
416 total=total)
417 yield self.fileheader(fname)
417 yield self.fileheader(fname)
418 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
418 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
419 reorder=reorder):
419 reorder=reorder):
420 yield chunk
420 yield chunk
421
421
422 def deltaparent(self, revlog, rev, p1, p2, prev):
422 def deltaparent(self, revlog, rev, p1, p2, prev):
423 return prev
423 return prev
424
424
425 def revchunk(self, revlog, rev, prev, linknode):
425 def revchunk(self, revlog, rev, prev, linknode):
426 node = revlog.node(rev)
426 node = revlog.node(rev)
427 p1, p2 = revlog.parentrevs(rev)
427 p1, p2 = revlog.parentrevs(rev)
428 base = self.deltaparent(revlog, rev, p1, p2, prev)
428 base = self.deltaparent(revlog, rev, p1, p2, prev)
429
429
430 prefix = ''
430 prefix = ''
431 if base == nullrev:
431 if base == nullrev:
432 delta = revlog.revision(node)
432 delta = revlog.revision(node)
433 prefix = mdiff.trivialdiffheader(len(delta))
433 prefix = mdiff.trivialdiffheader(len(delta))
434 else:
434 else:
435 delta = revlog.revdiff(base, rev)
435 delta = revlog.revdiff(base, rev)
436 p1n, p2n = revlog.parents(node)
436 p1n, p2n = revlog.parents(node)
437 basenode = revlog.node(base)
437 basenode = revlog.node(base)
438 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
438 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
439 meta += prefix
439 meta += prefix
440 l = len(meta) + len(delta)
440 l = len(meta) + len(delta)
441 yield chunkheader(l)
441 yield chunkheader(l)
442 yield meta
442 yield meta
443 yield delta
443 yield delta
444 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
444 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
445 # do nothing with basenode, it is implicitly the previous one in HG10
445 # do nothing with basenode, it is implicitly the previous one in HG10
446 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
446 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
447
447
448 class cg2packer(cg1packer):
448 class cg2packer(cg1packer):
449
449
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
451
451
452 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
452 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
453 if (revlog._generaldelta and reorder is not True):
453 if (revlog._generaldelta and reorder is not True):
454 reorder = False
454 reorder = False
455 return cg1packer.group(self, nodelist, revlog, lookup,
455 return cg1packer.group(self, nodelist, revlog, lookup,
456 units=units, reorder=reorder)
456 units=units, reorder=reorder)
457
457
458 def deltaparent(self, revlog, rev, p1, p2, prev):
458 def deltaparent(self, revlog, rev, p1, p2, prev):
459 dp = revlog.deltaparent(rev)
459 dp = revlog.deltaparent(rev)
460 # avoid storing full revisions; pick prev in those cases
460 # avoid storing full revisions; pick prev in those cases
461 # also pick prev when we can't be sure remote has dp
461 # also pick prev when we can't be sure remote has dp
462 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
462 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
463 return prev
463 return prev
464 return dp
464 return dp
465
465
466 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
466 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
467 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
467 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
468
468
469 packermap = {'01': (cg1packer, cg1unpacker),
469 packermap = {'01': (cg1packer, cg1unpacker),
470 '02': (cg2packer, cg2unpacker)}
470 '02': (cg2packer, cg2unpacker)}
471
471
472 def _changegroupinfo(repo, nodes, source):
472 def _changegroupinfo(repo, nodes, source):
473 if repo.ui.verbose or source == 'bundle':
473 if repo.ui.verbose or source == 'bundle':
474 repo.ui.status(_("%d changesets found\n") % len(nodes))
474 repo.ui.status(_("%d changesets found\n") % len(nodes))
475 if repo.ui.debugflag:
475 if repo.ui.debugflag:
476 repo.ui.debug("list of changesets:\n")
476 repo.ui.debug("list of changesets:\n")
477 for node in nodes:
477 for node in nodes:
478 repo.ui.debug("%s\n" % hex(node))
478 repo.ui.debug("%s\n" % hex(node))
479
479
480 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
480 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
481 repo = repo.unfiltered()
481 repo = repo.unfiltered()
482 commonrevs = outgoing.common
482 commonrevs = outgoing.common
483 csets = outgoing.missing
483 csets = outgoing.missing
484 heads = outgoing.missingheads
484 heads = outgoing.missingheads
485 # We go through the fast path if we get told to, or if all (unfiltered
485 # We go through the fast path if we get told to, or if all (unfiltered
486 # heads have been requested (since we then know there all linkrevs will
486 # heads have been requested (since we then know there all linkrevs will
487 # be pulled by the client).
487 # be pulled by the client).
488 heads.sort()
488 heads.sort()
489 fastpathlinkrev = fastpath or (
489 fastpathlinkrev = fastpath or (
490 repo.filtername is None and heads == sorted(repo.heads()))
490 repo.filtername is None and heads == sorted(repo.heads()))
491
491
492 repo.hook('preoutgoing', throw=True, source=source)
492 repo.hook('preoutgoing', throw=True, source=source)
493 _changegroupinfo(repo, csets, source)
493 _changegroupinfo(repo, csets, source)
494 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
494 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
495
495
496 def getsubset(repo, outgoing, bundler, source, fastpath=False):
496 def getsubset(repo, outgoing, bundler, source, fastpath=False):
497 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
497 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
498 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
498 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
499
499
500 def changegroupsubset(repo, roots, heads, source):
500 def changegroupsubset(repo, roots, heads, source):
501 """Compute a changegroup consisting of all the nodes that are
501 """Compute a changegroup consisting of all the nodes that are
502 descendants of any of the roots and ancestors of any of the heads.
502 descendants of any of the roots and ancestors of any of the heads.
503 Return a chunkbuffer object whose read() method will return
503 Return a chunkbuffer object whose read() method will return
504 successive changegroup chunks.
504 successive changegroup chunks.
505
505
506 It is fairly complex as determining which filenodes and which
506 It is fairly complex as determining which filenodes and which
507 manifest nodes need to be included for the changeset to be complete
507 manifest nodes need to be included for the changeset to be complete
508 is non-trivial.
508 is non-trivial.
509
509
510 Another wrinkle is doing the reverse, figuring out which changeset in
510 Another wrinkle is doing the reverse, figuring out which changeset in
511 the changegroup a particular filenode or manifestnode belongs to.
511 the changegroup a particular filenode or manifestnode belongs to.
512 """
512 """
513 cl = repo.changelog
513 cl = repo.changelog
514 if not roots:
514 if not roots:
515 roots = [nullid]
515 roots = [nullid]
516 # TODO: remove call to nodesbetween.
516 # TODO: remove call to nodesbetween.
517 csets, roots, heads = cl.nodesbetween(roots, heads)
517 csets, roots, heads = cl.nodesbetween(roots, heads)
518 discbases = []
518 discbases = []
519 for n in roots:
519 for n in roots:
520 discbases.extend([p for p in cl.parents(n) if p != nullid])
520 discbases.extend([p for p in cl.parents(n) if p != nullid])
521 outgoing = discovery.outgoing(cl, discbases, heads)
521 outgoing = discovery.outgoing(cl, discbases, heads)
522 bundler = cg1packer(repo)
522 bundler = cg1packer(repo)
523 return getsubset(repo, outgoing, bundler, source)
523 return getsubset(repo, outgoing, bundler, source)
524
524
525 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
525 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
526 version='01'):
526 version='01'):
527 """Like getbundle, but taking a discovery.outgoing as an argument.
527 """Like getbundle, but taking a discovery.outgoing as an argument.
528
528
529 This is only implemented for local repos and reuses potentially
529 This is only implemented for local repos and reuses potentially
530 precomputed sets in outgoing. Returns a raw changegroup generator."""
530 precomputed sets in outgoing. Returns a raw changegroup generator."""
531 if not outgoing.missing:
531 if not outgoing.missing:
532 return None
532 return None
533 bundler = packermap[version][0](repo, bundlecaps)
533 bundler = packermap[version][0](repo, bundlecaps)
534 return getsubsetraw(repo, outgoing, bundler, source)
534 return getsubsetraw(repo, outgoing, bundler, source)
535
535
536 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
536 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
537 """Like getbundle, but taking a discovery.outgoing as an argument.
537 """Like getbundle, but taking a discovery.outgoing as an argument.
538
538
539 This is only implemented for local repos and reuses potentially
539 This is only implemented for local repos and reuses potentially
540 precomputed sets in outgoing."""
540 precomputed sets in outgoing."""
541 if not outgoing.missing:
541 if not outgoing.missing:
542 return None
542 return None
543 bundler = cg1packer(repo, bundlecaps)
543 bundler = cg1packer(repo, bundlecaps)
544 return getsubset(repo, outgoing, bundler, source)
544 return getsubset(repo, outgoing, bundler, source)
545
545
546 def _computeoutgoing(repo, heads, common):
546 def _computeoutgoing(repo, heads, common):
547 """Computes which revs are outgoing given a set of common
547 """Computes which revs are outgoing given a set of common
548 and a set of heads.
548 and a set of heads.
549
549
550 This is a separate function so extensions can have access to
550 This is a separate function so extensions can have access to
551 the logic.
551 the logic.
552
552
553 Returns a discovery.outgoing object.
553 Returns a discovery.outgoing object.
554 """
554 """
555 cl = repo.changelog
555 cl = repo.changelog
556 if common:
556 if common:
557 hasnode = cl.hasnode
557 hasnode = cl.hasnode
558 common = [n for n in common if hasnode(n)]
558 common = [n for n in common if hasnode(n)]
559 else:
559 else:
560 common = [nullid]
560 common = [nullid]
561 if not heads:
561 if not heads:
562 heads = cl.heads()
562 heads = cl.heads()
563 return discovery.outgoing(cl, common, heads)
563 return discovery.outgoing(cl, common, heads)
564
564
565 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
565 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
566 version='01'):
566 version='01'):
567 """Like changegroupsubset, but returns the set difference between the
567 """Like changegroupsubset, but returns the set difference between the
568 ancestors of heads and the ancestors common.
568 ancestors of heads and the ancestors common.
569
569
570 If heads is None, use the local heads. If common is None, use [nullid].
570 If heads is None, use the local heads. If common is None, use [nullid].
571
571
572 If version is None, use a version '1' changegroup.
572 If version is None, use a version '1' changegroup.
573
573
574 The nodes in common might not all be known locally due to the way the
574 The nodes in common might not all be known locally due to the way the
575 current discovery protocol works. Returns a raw changegroup generator.
575 current discovery protocol works. Returns a raw changegroup generator.
576 """
576 """
577 outgoing = _computeoutgoing(repo, heads, common)
577 outgoing = _computeoutgoing(repo, heads, common)
578 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
578 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
579 version=version)
579 version=version)
580
580
581 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
581 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
582 """Like changegroupsubset, but returns the set difference between the
582 """Like changegroupsubset, but returns the set difference between the
583 ancestors of heads and the ancestors common.
583 ancestors of heads and the ancestors common.
584
584
585 If heads is None, use the local heads. If common is None, use [nullid].
585 If heads is None, use the local heads. If common is None, use [nullid].
586
586
587 The nodes in common might not all be known locally due to the way the
587 The nodes in common might not all be known locally due to the way the
588 current discovery protocol works.
588 current discovery protocol works.
589 """
589 """
590 outgoing = _computeoutgoing(repo, heads, common)
590 outgoing = _computeoutgoing(repo, heads, common)
591 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
591 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
592
592
593 def changegroup(repo, basenodes, source):
593 def changegroup(repo, basenodes, source):
594 # to avoid a race we use changegroupsubset() (issue1320)
594 # to avoid a race we use changegroupsubset() (issue1320)
595 return changegroupsubset(repo, basenodes, repo.heads(), source)
595 return changegroupsubset(repo, basenodes, repo.heads(), source)
596
596
597 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
597 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
598 revisions = 0
598 revisions = 0
599 files = 0
599 files = 0
600 while True:
600 while True:
601 chunkdata = source.filelogheader()
601 chunkdata = source.filelogheader()
602 if not chunkdata:
602 if not chunkdata:
603 break
603 break
604 f = chunkdata["filename"]
604 f = chunkdata["filename"]
605 repo.ui.debug("adding %s revisions\n" % f)
605 repo.ui.debug("adding %s revisions\n" % f)
606 pr()
606 pr()
607 fl = repo.file(f)
607 fl = repo.file(f)
608 o = len(fl)
608 o = len(fl)
609 if not fl.addgroup(source, revmap, trp):
609 if not fl.addgroup(source, revmap, trp):
610 raise util.Abort(_("received file revlog group is empty"))
610 raise util.Abort(_("received file revlog group is empty"))
611 revisions += len(fl) - o
611 revisions += len(fl) - o
612 files += 1
612 files += 1
613 if f in needfiles:
613 if f in needfiles:
614 needs = needfiles[f]
614 needs = needfiles[f]
615 for new in xrange(o, len(fl)):
615 for new in xrange(o, len(fl)):
616 n = fl.node(new)
616 n = fl.node(new)
617 if n in needs:
617 if n in needs:
618 needs.remove(n)
618 needs.remove(n)
619 else:
619 else:
620 raise util.Abort(
620 raise util.Abort(
621 _("received spurious file revlog entry"))
621 _("received spurious file revlog entry"))
622 if not needs:
622 if not needs:
623 del needfiles[f]
623 del needfiles[f]
624 repo.ui.progress(_('files'), None)
624 repo.ui.progress(_('files'), None)
625
625
626 for f, needs in needfiles.iteritems():
626 for f, needs in needfiles.iteritems():
627 fl = repo.file(f)
627 fl = repo.file(f)
628 for n in needs:
628 for n in needs:
629 try:
629 try:
630 fl.rev(n)
630 fl.rev(n)
631 except error.LookupError:
631 except error.LookupError:
632 raise util.Abort(
632 raise util.Abort(
633 _('missing file data for %s:%s - run hg verify') %
633 _('missing file data for %s:%s - run hg verify') %
634 (f, hex(n)))
634 (f, hex(n)))
635
635
636 return revisions, files
636 return revisions, files
637
637
638 def addchangegroup(repo, source, srctype, url, emptyok=False,
638 def addchangegroup(repo, source, srctype, url, emptyok=False,
639 targetphase=phases.draft):
639 targetphase=phases.draft):
640 """Add the changegroup returned by source.read() to this repo.
640 """Add the changegroup returned by source.read() to this repo.
641 srctype is a string like 'push', 'pull', or 'unbundle'. url is
641 srctype is a string like 'push', 'pull', or 'unbundle'. url is
642 the URL of the repo where this changegroup is coming from.
642 the URL of the repo where this changegroup is coming from.
643
643
644 Return an integer summarizing the change to this repo:
644 Return an integer summarizing the change to this repo:
645 - nothing changed or no source: 0
645 - nothing changed or no source: 0
646 - more heads than before: 1+added heads (2..n)
646 - more heads than before: 1+added heads (2..n)
647 - fewer heads than before: -1-removed heads (-2..-n)
647 - fewer heads than before: -1-removed heads (-2..-n)
648 - number of heads stays the same: 1
648 - number of heads stays the same: 1
649 """
649 """
650 repo = repo.unfiltered()
650 repo = repo.unfiltered()
651 def csmap(x):
651 def csmap(x):
652 repo.ui.debug("add changeset %s\n" % short(x))
652 repo.ui.debug("add changeset %s\n" % short(x))
653 return len(cl)
653 return len(cl)
654
654
655 def revmap(x):
655 def revmap(x):
656 return cl.rev(x)
656 return cl.rev(x)
657
657
658 if not source:
658 if not source:
659 return 0
659 return 0
660
660
661 changesets = files = revisions = 0
661 changesets = files = revisions = 0
662 efiles = set()
662 efiles = set()
663
663
664 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
664 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
665 # The transaction could have been created before and already carries source
665 # The transaction could have been created before and already carries source
666 # information. In this case we use the top level data. We overwrite the
666 # information. In this case we use the top level data. We overwrite the
667 # argument because we need to use the top level value (if they exist) in
667 # argument because we need to use the top level value (if they exist) in
668 # this function.
668 # this function.
669 srctype = tr.hookargs.setdefault('source', srctype)
669 srctype = tr.hookargs.setdefault('source', srctype)
670 url = tr.hookargs.setdefault('url', url)
670 url = tr.hookargs.setdefault('url', url)
671
671
672 # write changelog data to temp files so concurrent readers will not see
672 # write changelog data to temp files so concurrent readers will not see
673 # inconsistent view
673 # inconsistent view
674 cl = repo.changelog
674 cl = repo.changelog
675 cl.delayupdate(tr)
675 cl.delayupdate(tr)
676 oldheads = cl.heads()
676 oldheads = cl.heads()
677 try:
677 try:
678 repo.hook('prechangegroup', throw=True, **tr.hookargs)
678 repo.hook('prechangegroup', throw=True, **tr.hookargs)
679
679
680 trp = weakref.proxy(tr)
680 trp = weakref.proxy(tr)
681 # pull off the changeset group
681 # pull off the changeset group
682 repo.ui.status(_("adding changesets\n"))
682 repo.ui.status(_("adding changesets\n"))
683 clstart = len(cl)
683 clstart = len(cl)
684 class prog(object):
684 class prog(object):
685 step = _('changesets')
685 step = _('changesets')
686 count = 1
686 count = 1
687 ui = repo.ui
687 ui = repo.ui
688 total = None
688 total = None
689 def __call__(repo):
689 def __call__(repo):
690 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
690 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
691 total=repo.total)
691 total=repo.total)
692 repo.count += 1
692 repo.count += 1
693 pr = prog()
693 pr = prog()
694 source.callback = pr
694 source.callback = pr
695
695
696 source.changelogheader()
696 source.changelogheader()
697 srccontent = cl.addgroup(source, csmap, trp)
697 srccontent = cl.addgroup(source, csmap, trp)
698 if not (srccontent or emptyok):
698 if not (srccontent or emptyok):
699 raise util.Abort(_("received changelog group is empty"))
699 raise util.Abort(_("received changelog group is empty"))
700 clend = len(cl)
700 clend = len(cl)
701 changesets = clend - clstart
701 changesets = clend - clstart
702 for c in xrange(clstart, clend):
702 for c in xrange(clstart, clend):
703 efiles.update(repo[c].files())
703 efiles.update(repo[c].files())
704 efiles = len(efiles)
704 efiles = len(efiles)
705 repo.ui.progress(_('changesets'), None)
705 repo.ui.progress(_('changesets'), None)
706
706
707 # pull off the manifest group
707 # pull off the manifest group
708 repo.ui.status(_("adding manifests\n"))
708 repo.ui.status(_("adding manifests\n"))
709 pr.step = _('manifests')
709 pr.step = _('manifests')
710 pr.count = 1
710 pr.count = 1
711 pr.total = changesets # manifests <= changesets
711 pr.total = changesets # manifests <= changesets
712 # no need to check for empty manifest group here:
712 # no need to check for empty manifest group here:
713 # if the result of the merge of 1 and 2 is the same in 3 and 4,
713 # if the result of the merge of 1 and 2 is the same in 3 and 4,
714 # no new manifest will be created and the manifest group will
714 # no new manifest will be created and the manifest group will
715 # be empty during the pull
715 # be empty during the pull
716 source.manifestheader()
716 source.manifestheader()
717 repo.manifest.addgroup(source, revmap, trp)
717 repo.manifest.addgroup(source, revmap, trp)
718 repo.ui.progress(_('manifests'), None)
718 repo.ui.progress(_('manifests'), None)
719
719
720 needfiles = {}
720 needfiles = {}
721 if repo.ui.configbool('server', 'validate', default=False):
721 if repo.ui.configbool('server', 'validate', default=False):
722 # validate incoming csets have their manifests
722 # validate incoming csets have their manifests
723 for cset in xrange(clstart, clend):
723 for cset in xrange(clstart, clend):
724 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
724 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
725 mfest = repo.manifest.readdelta(mfest)
725 mfest = repo.manifest.readdelta(mfest)
726 # store file nodes we must see
726 # store file nodes we must see
727 for f, n in mfest.iteritems():
727 for f, n in mfest.iteritems():
728 needfiles.setdefault(f, set()).add(n)
728 needfiles.setdefault(f, set()).add(n)
729
729
730 # process the files
730 # process the files
731 repo.ui.status(_("adding file changes\n"))
731 repo.ui.status(_("adding file changes\n"))
732 pr.step = _('files')
732 pr.step = _('files')
733 pr.count = 1
733 pr.count = 1
734 pr.total = efiles
734 pr.total = efiles
735 source.callback = None
735 source.callback = None
736
736
737 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
737 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
738 needfiles)
738 needfiles)
739 revisions += newrevs
739 revisions += newrevs
740 files += newfiles
740 files += newfiles
741
741
742 dh = 0
742 dh = 0
743 if oldheads:
743 if oldheads:
744 heads = cl.heads()
744 heads = cl.heads()
745 dh = len(heads) - len(oldheads)
745 dh = len(heads) - len(oldheads)
746 for h in heads:
746 for h in heads:
747 if h not in oldheads and repo[h].closesbranch():
747 if h not in oldheads and repo[h].closesbranch():
748 dh -= 1
748 dh -= 1
749 htext = ""
749 htext = ""
750 if dh:
750 if dh:
751 htext = _(" (%+d heads)") % dh
751 htext = _(" (%+d heads)") % dh
752
752
753 repo.ui.status(_("added %d changesets"
753 repo.ui.status(_("added %d changesets"
754 " with %d changes to %d files%s\n")
754 " with %d changes to %d files%s\n")
755 % (changesets, revisions, files, htext))
755 % (changesets, revisions, files, htext))
756 repo.invalidatevolatilesets()
756 repo.invalidatevolatilesets()
757
757
758 if changesets > 0:
758 if changesets > 0:
759 p = lambda: tr.writepending() and repo.root or ""
759 p = lambda: tr.writepending() and repo.root or ""
760 if 'node' not in tr.hookargs:
760 if 'node' not in tr.hookargs:
761 tr.hookargs['node'] = hex(cl.node(clstart))
761 tr.hookargs['node'] = hex(cl.node(clstart))
762 hookargs = dict(tr.hookargs)
762 hookargs = dict(tr.hookargs)
763 else:
763 else:
764 hookargs = dict(tr.hookargs)
764 hookargs = dict(tr.hookargs)
765 hookargs['node'] = hex(cl.node(clstart))
765 hookargs['node'] = hex(cl.node(clstart))
766 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
766 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
767
767
768 added = [cl.node(r) for r in xrange(clstart, clend)]
768 added = [cl.node(r) for r in xrange(clstart, clend)]
769 publishing = repo.ui.configbool('phases', 'publish', True)
769 publishing = repo.ui.configbool('phases', 'publish', True)
770 if srctype in ('push', 'serve'):
770 if srctype in ('push', 'serve'):
771 # Old servers can not push the boundary themselves.
771 # Old servers can not push the boundary themselves.
772 # New servers won't push the boundary if changeset already
772 # New servers won't push the boundary if changeset already
773 # exists locally as secret
773 # exists locally as secret
774 #
774 #
775 # We should not use added here but the list of all change in
775 # We should not use added here but the list of all change in
776 # the bundle
776 # the bundle
777 if publishing:
777 if publishing:
778 phases.advanceboundary(repo, tr, phases.public, srccontent)
778 phases.advanceboundary(repo, tr, phases.public, srccontent)
779 else:
779 else:
780 # Those changesets have been pushed from the outside, their
780 # Those changesets have been pushed from the outside, their
781 # phases are going to be pushed alongside. Therefor
781 # phases are going to be pushed alongside. Therefor
782 # `targetphase` is ignored.
782 # `targetphase` is ignored.
783 phases.advanceboundary(repo, tr, phases.draft, srccontent)
783 phases.advanceboundary(repo, tr, phases.draft, srccontent)
784 phases.retractboundary(repo, tr, phases.draft, added)
784 phases.retractboundary(repo, tr, phases.draft, added)
785 elif srctype != 'strip':
785 elif srctype != 'strip':
786 # publishing only alter behavior during push
786 # publishing only alter behavior during push
787 #
787 #
788 # strip should not touch boundary at all
788 # strip should not touch boundary at all
789 phases.retractboundary(repo, tr, targetphase, added)
789 phases.retractboundary(repo, tr, targetphase, added)
790
790
791
792 tr.close()
793
794 if changesets > 0:
791 if changesets > 0:
795 if srctype != 'strip':
792 if srctype != 'strip':
796 # During strip, branchcache is invalid but coming call to
793 # During strip, branchcache is invalid but coming call to
797 # `destroyed` will repair it.
794 # `destroyed` will repair it.
798 # In other case we can safely update cache on disk.
795 # In other case we can safely update cache on disk.
799 branchmap.updatecache(repo.filtered('served'))
796 branchmap.updatecache(repo.filtered('served'))
800
797
801 def runhooks():
798 def runhooks():
802 # These hooks run when the lock releases, not when the
799 # These hooks run when the lock releases, not when the
803 # transaction closes. So it's possible for the changelog
800 # transaction closes. So it's possible for the changelog
804 # to have changed since we last saw it.
801 # to have changed since we last saw it.
805 if clstart >= len(repo):
802 if clstart >= len(repo):
806 return
803 return
807
804
808 # forcefully update the on-disk branch cache
805 # forcefully update the on-disk branch cache
809 repo.ui.debug("updating the branch cache\n")
806 repo.ui.debug("updating the branch cache\n")
810 repo.hook("changegroup", **hookargs)
807 repo.hook("changegroup", **hookargs)
811
808
812 for n in added:
809 for n in added:
813 args = hookargs.copy()
810 args = hookargs.copy()
814 args['node'] = hex(n)
811 args['node'] = hex(n)
815 repo.hook("incoming", **args)
812 repo.hook("incoming", **args)
816
813
817 newheads = [h for h in repo.heads() if h not in oldheads]
814 newheads = [h for h in repo.heads() if h not in oldheads]
818 repo.ui.log("incoming",
815 repo.ui.log("incoming",
819 "%s incoming changes - new heads: %s\n",
816 "%s incoming changes - new heads: %s\n",
820 len(added),
817 len(added),
821 ', '.join([hex(c[:6]) for c in newheads]))
818 ', '.join([hex(c[:6]) for c in newheads]))
822 repo._afterlock(runhooks)
819
820 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
821 lambda: repo._afterlock(runhooks))
822
823 tr.close()
823
824
824 finally:
825 finally:
825 tr.release()
826 tr.release()
826 # never return 0 here:
827 # never return 0 here:
827 if dh < 0:
828 if dh < 0:
828 return dh - 1
829 return dh - 1
829 else:
830 else:
830 return dh + 1
831 return dh + 1
@@ -1,484 +1,482 b''
1 Test exchange of common information using bundle2
1 Test exchange of common information using bundle2
2
2
3
3
4 $ getmainid() {
4 $ getmainid() {
5 > hg -R main log --template '{node}\n' --rev "$1"
5 > hg -R main log --template '{node}\n' --rev "$1"
6 > }
6 > }
7
7
8 enable obsolescence
8 enable obsolescence
9
9
10 $ cat >> $HGRCPATH << EOF
10 $ cat >> $HGRCPATH << EOF
11 > [experimental]
11 > [experimental]
12 > evolution=createmarkers,exchange
12 > evolution=createmarkers,exchange
13 > bundle2-exp=True
13 > bundle2-exp=True
14 > [ui]
14 > [ui]
15 > ssh=python "$TESTDIR/dummyssh"
15 > ssh=python "$TESTDIR/dummyssh"
16 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
16 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
17 > [web]
17 > [web]
18 > push_ssl = false
18 > push_ssl = false
19 > allow_push = *
19 > allow_push = *
20 > [phases]
20 > [phases]
21 > publish=False
21 > publish=False
22 > [hooks]
22 > [hooks]
23 > changegroup = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" changegroup"
23 > changegroup = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" changegroup"
24 > b2x-transactionclose = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" b2x-transactionclose"
24 > b2x-transactionclose = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" b2x-transactionclose"
25 > EOF
25 > EOF
26
26
27 The extension requires a repo (currently unused)
27 The extension requires a repo (currently unused)
28
28
29 $ hg init main
29 $ hg init main
30 $ cd main
30 $ cd main
31 $ touch a
31 $ touch a
32 $ hg add a
32 $ hg add a
33 $ hg commit -m 'a'
33 $ hg commit -m 'a'
34
34
35 $ hg unbundle $TESTDIR/bundles/rebase.hg
35 $ hg unbundle $TESTDIR/bundles/rebase.hg
36 adding changesets
36 adding changesets
37 adding manifests
37 adding manifests
38 adding file changes
38 adding file changes
39 added 8 changesets with 7 changes to 7 files (+3 heads)
39 added 8 changesets with 7 changes to 7 files (+3 heads)
40 changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=unbundle HG_URL=bundle:*/rebase.hg (glob)
40 changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=unbundle HG_URL=bundle:*/rebase.hg (glob)
41 (run 'hg heads' to see heads, 'hg merge' to merge)
41 (run 'hg heads' to see heads, 'hg merge' to merge)
42
42
43 $ cd ..
43 $ cd ..
44
44
45 Real world exchange
45 Real world exchange
46 =====================
46 =====================
47
47
48 Add more obsolescence information
48 Add more obsolescence information
49
49
50 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
50 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
51 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
51 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
52
52
53 clone --pull
53 clone --pull
54
54
55 $ hg -R main phase --public cd010b8cd998
55 $ hg -R main phase --public cd010b8cd998
56 $ hg clone main other --pull --rev 9520eea781bc
56 $ hg clone main other --pull --rev 9520eea781bc
57 adding changesets
57 adding changesets
58 adding manifests
58 adding manifests
59 adding file changes
59 adding file changes
60 added 2 changesets with 2 changes to 2 files
60 added 2 changesets with 2 changes to 2 files
61 1 new obsolescence markers
61 1 new obsolescence markers
62 changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
62 changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
63 b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
63 b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
64 updating to branch default
64 updating to branch default
65 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
65 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
66 $ hg -R other log -G
66 $ hg -R other log -G
67 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
67 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
68 |
68 |
69 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
69 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
70
70
71 $ hg -R other debugobsolete
71 $ hg -R other debugobsolete
72 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
72 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
73
73
74 pull
74 pull
75
75
76 $ hg -R main phase --public 9520eea781bc
76 $ hg -R main phase --public 9520eea781bc
77 $ hg -R other pull -r 24b6387c8c8c
77 $ hg -R other pull -r 24b6387c8c8c
78 pulling from $TESTTMP/main (glob)
78 pulling from $TESTTMP/main (glob)
79 searching for changes
79 searching for changes
80 adding changesets
80 adding changesets
81 adding manifests
81 adding manifests
82 adding file changes
82 adding file changes
83 added 1 changesets with 1 changes to 1 files (+1 heads)
83 added 1 changesets with 1 changes to 1 files (+1 heads)
84 1 new obsolescence markers
84 1 new obsolescence markers
85 changegroup hook: HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
85 changegroup hook: HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
86 b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
86 b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
87 (run 'hg heads' to see heads, 'hg merge' to merge)
87 (run 'hg heads' to see heads, 'hg merge' to merge)
88 $ hg -R other log -G
88 $ hg -R other log -G
89 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
89 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
90 |
90 |
91 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
91 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
92 |/
92 |/
93 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
93 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
94
94
95 $ hg -R other debugobsolete
95 $ hg -R other debugobsolete
96 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
96 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
97 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
97 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
98
98
99 pull empty (with phase movement)
99 pull empty (with phase movement)
100
100
101 $ hg -R main phase --public 24b6387c8c8c
101 $ hg -R main phase --public 24b6387c8c8c
102 $ hg -R other pull -r 24b6387c8c8c
102 $ hg -R other pull -r 24b6387c8c8c
103 pulling from $TESTTMP/main (glob)
103 pulling from $TESTTMP/main (glob)
104 no changes found
104 no changes found
105 b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
105 b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
106 $ hg -R other log -G
106 $ hg -R other log -G
107 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
107 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
108 |
108 |
109 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
109 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
110 |/
110 |/
111 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
111 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
112
112
113 $ hg -R other debugobsolete
113 $ hg -R other debugobsolete
114 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
114 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
115 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
115 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
116
116
117 pull empty
117 pull empty
118
118
119 $ hg -R other pull -r 24b6387c8c8c
119 $ hg -R other pull -r 24b6387c8c8c
120 pulling from $TESTTMP/main (glob)
120 pulling from $TESTTMP/main (glob)
121 no changes found
121 no changes found
122 b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
122 b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
123 $ hg -R other log -G
123 $ hg -R other log -G
124 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
124 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
125 |
125 |
126 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
126 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
127 |/
127 |/
128 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
128 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
129
129
130 $ hg -R other debugobsolete
130 $ hg -R other debugobsolete
131 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
131 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
132 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
132 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
133
133
134 add extra data to test their exchange during push
134 add extra data to test their exchange during push
135
135
136 $ hg -R main bookmark --rev eea13746799a book_eea1
136 $ hg -R main bookmark --rev eea13746799a book_eea1
137 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
137 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
138 $ hg -R main bookmark --rev 02de42196ebe book_02de
138 $ hg -R main bookmark --rev 02de42196ebe book_02de
139 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
139 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
140 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
140 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
141 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
141 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
142 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
142 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
143 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
143 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
144 $ hg -R main bookmark --rev 32af7686d403 book_32af
144 $ hg -R main bookmark --rev 32af7686d403 book_32af
145 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
145 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
146
146
147 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
147 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
148 $ hg -R other bookmark --rev cd010b8cd998 book_02de
148 $ hg -R other bookmark --rev cd010b8cd998 book_02de
149 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
149 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
150 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
150 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
151 $ hg -R other bookmark --rev cd010b8cd998 book_32af
151 $ hg -R other bookmark --rev cd010b8cd998 book_32af
152
152
153 $ hg -R main phase --public eea13746799a
153 $ hg -R main phase --public eea13746799a
154
154
155 push
155 push
156 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
156 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
157 pushing to other
157 pushing to other
158 searching for changes
158 searching for changes
159 changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_SOURCE=push HG_URL=push
159 changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_SOURCE=push HG_URL=push
160 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_URL=push
160 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_URL=push
161 remote: adding changesets
161 remote: adding changesets
162 remote: adding manifests
162 remote: adding manifests
163 remote: adding file changes
163 remote: adding file changes
164 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
164 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
165 remote: 1 new obsolescence markers
165 remote: 1 new obsolescence markers
166 updating bookmark book_eea1
166 updating bookmark book_eea1
167 $ hg -R other log -G
167 $ hg -R other log -G
168 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
168 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
169 |\
169 |\
170 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
170 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
171 | |
171 | |
172 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
172 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
173 |/
173 |/
174 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
174 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
175
175
176 $ hg -R other debugobsolete
176 $ hg -R other debugobsolete
177 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
177 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
178 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
178 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
179 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
179 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
180
180
181 pull over ssh
181 pull over ssh
182
182
183 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
183 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
184 pulling from ssh://user@dummy/main
184 pulling from ssh://user@dummy/main
185 searching for changes
185 searching for changes
186 adding changesets
186 adding changesets
187 adding manifests
187 adding manifests
188 adding file changes
188 adding file changes
189 added 1 changesets with 1 changes to 1 files (+1 heads)
189 added 1 changesets with 1 changes to 1 files (+1 heads)
190 1 new obsolescence markers
190 1 new obsolescence markers
191 updating bookmark book_02de
191 updating bookmark book_02de
192 changegroup hook: HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
192 changegroup hook: HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
193 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
193 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
194 (run 'hg heads' to see heads, 'hg merge' to merge)
194 (run 'hg heads' to see heads, 'hg merge' to merge)
195 $ hg -R other debugobsolete
195 $ hg -R other debugobsolete
196 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
196 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
197 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
197 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
198 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
198 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
199 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
199 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
200
200
201 pull over http
201 pull over http
202
202
203 $ hg -R main serve -p $HGPORT -d --pid-file=main.pid -E main-error.log
203 $ hg -R main serve -p $HGPORT -d --pid-file=main.pid -E main-error.log
204 $ cat main.pid >> $DAEMON_PIDS
204 $ cat main.pid >> $DAEMON_PIDS
205
205
206 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
206 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
207 pulling from http://localhost:$HGPORT/
207 pulling from http://localhost:$HGPORT/
208 searching for changes
208 searching for changes
209 adding changesets
209 adding changesets
210 adding manifests
210 adding manifests
211 adding file changes
211 adding file changes
212 added 1 changesets with 1 changes to 1 files (+1 heads)
212 added 1 changesets with 1 changes to 1 files (+1 heads)
213 1 new obsolescence markers
213 1 new obsolescence markers
214 updating bookmark book_42cc
214 updating bookmark book_42cc
215 changegroup hook: HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
215 changegroup hook: HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
216 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
216 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
217 (run 'hg heads .' to see heads, 'hg merge' to merge)
217 (run 'hg heads .' to see heads, 'hg merge' to merge)
218 $ cat main-error.log
218 $ cat main-error.log
219 $ hg -R other debugobsolete
219 $ hg -R other debugobsolete
220 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
220 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
221 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
221 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
222 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
222 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
223 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
223 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
224 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
224 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
225
225
226 push over ssh
226 push over ssh
227
227
228 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
228 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
229 pushing to ssh://user@dummy/other
229 pushing to ssh://user@dummy/other
230 searching for changes
230 searching for changes
231 remote: adding changesets
231 remote: adding changesets
232 remote: adding manifests
232 remote: adding manifests
233 remote: adding file changes
233 remote: adding file changes
234 remote: added 1 changesets with 1 changes to 1 files
234 remote: added 1 changesets with 1 changes to 1 files
235 remote: 1 new obsolescence markers
235 remote: 1 new obsolescence markers
236 updating bookmark book_5fdd
236 updating bookmark book_5fdd
237 remote: changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
237 remote: changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
238 remote: b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
238 remote: b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
239 $ hg -R other log -G
239 $ hg -R other log -G
240 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
240 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
241 |
241 |
242 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
242 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
243 |
243 |
244 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
244 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
245 | |
245 | |
246 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
246 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
247 | |/|
247 | |/|
248 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
248 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
249 |/ /
249 |/ /
250 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
250 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
251 |/
251 |/
252 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
252 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
253
253
254 $ hg -R other debugobsolete
254 $ hg -R other debugobsolete
255 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
255 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
256 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
256 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
257 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
257 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
258 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
258 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
259 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
259 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
260 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
260 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
261
261
262 push over http
262 push over http
263
263
264 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
264 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
265 $ cat other.pid >> $DAEMON_PIDS
265 $ cat other.pid >> $DAEMON_PIDS
266
266
267 $ hg -R main phase --public 32af7686d403
267 $ hg -R main phase --public 32af7686d403
268 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
268 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
269 pushing to http://localhost:$HGPORT2/
269 pushing to http://localhost:$HGPORT2/
270 searching for changes
270 searching for changes
271 remote: adding changesets
271 remote: adding changesets
272 remote: adding manifests
272 remote: adding manifests
273 remote: adding file changes
273 remote: adding file changes
274 remote: added 1 changesets with 1 changes to 1 files
274 remote: added 1 changesets with 1 changes to 1 files
275 remote: 1 new obsolescence markers
275 remote: 1 new obsolescence markers
276 updating bookmark book_32af
276 updating bookmark book_32af
277 $ cat other-error.log
277 $ cat other-error.log
278
278
279 Check final content.
279 Check final content.
280
280
281 $ hg -R other log -G
281 $ hg -R other log -G
282 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
282 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
283 |
283 |
284 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
284 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
285 |
285 |
286 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
286 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
287 |
287 |
288 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
288 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
289 | |
289 | |
290 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
290 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
291 | |/|
291 | |/|
292 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
292 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
293 |/ /
293 |/ /
294 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
294 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
295 |/
295 |/
296 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
296 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
297
297
298 $ hg -R other debugobsolete
298 $ hg -R other debugobsolete
299 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
299 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
300 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
300 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
301 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
301 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
302 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
302 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
303 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
303 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
304 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
304 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
305 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
305 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
306
306
307 Error Handling
307 Error Handling
308 ==============
308 ==============
309
309
310 Check that errors are properly returned to the client during push.
310 Check that errors are properly returned to the client during push.
311
311
312 Setting up
312 Setting up
313
313
314 $ cat > failpush.py << EOF
314 $ cat > failpush.py << EOF
315 > """A small extension that makes push fails when using bundle2
315 > """A small extension that makes push fails when using bundle2
316 >
316 >
317 > used to test error handling in bundle2
317 > used to test error handling in bundle2
318 > """
318 > """
319 >
319 >
320 > from mercurial import util
320 > from mercurial import util
321 > from mercurial import bundle2
321 > from mercurial import bundle2
322 > from mercurial import exchange
322 > from mercurial import exchange
323 > from mercurial import extensions
323 > from mercurial import extensions
324 >
324 >
325 > def _pushbundle2failpart(pushop, bundler):
325 > def _pushbundle2failpart(pushop, bundler):
326 > reason = pushop.ui.config('failpush', 'reason', None)
326 > reason = pushop.ui.config('failpush', 'reason', None)
327 > part = None
327 > part = None
328 > if reason == 'abort':
328 > if reason == 'abort':
329 > bundler.newpart('test:abort')
329 > bundler.newpart('test:abort')
330 > if reason == 'unknown':
330 > if reason == 'unknown':
331 > bundler.newpart('TEST:UNKNOWN')
331 > bundler.newpart('TEST:UNKNOWN')
332 > if reason == 'race':
332 > if reason == 'race':
333 > # 20 Bytes of crap
333 > # 20 Bytes of crap
334 > bundler.newpart('b2x:check:heads', data='01234567890123456789')
334 > bundler.newpart('b2x:check:heads', data='01234567890123456789')
335 >
335 >
336 > @bundle2.parthandler("test:abort")
336 > @bundle2.parthandler("test:abort")
337 > def handleabort(op, part):
337 > def handleabort(op, part):
338 > raise util.Abort('Abandon ship!', hint="don't panic")
338 > raise util.Abort('Abandon ship!', hint="don't panic")
339 >
339 >
340 > def uisetup(ui):
340 > def uisetup(ui):
341 > exchange.b2partsgenmapping['failpart'] = _pushbundle2failpart
341 > exchange.b2partsgenmapping['failpart'] = _pushbundle2failpart
342 > exchange.b2partsgenorder.insert(0, 'failpart')
342 > exchange.b2partsgenorder.insert(0, 'failpart')
343 >
343 >
344 > EOF
344 > EOF
345
345
346 $ cd main
346 $ cd main
347 $ hg up tip
347 $ hg up tip
348 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
348 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
349 $ echo 'I' > I
349 $ echo 'I' > I
350 $ hg add I
350 $ hg add I
351 $ hg ci -m 'I'
351 $ hg ci -m 'I'
352 $ hg id
352 $ hg id
353 e7ec4e813ba6 tip
353 e7ec4e813ba6 tip
354 $ cd ..
354 $ cd ..
355
355
356 $ cat << EOF >> $HGRCPATH
356 $ cat << EOF >> $HGRCPATH
357 > [extensions]
357 > [extensions]
358 > failpush=$TESTTMP/failpush.py
358 > failpush=$TESTTMP/failpush.py
359 > EOF
359 > EOF
360
360
361 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
361 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
362 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
362 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
363 $ cat other.pid >> $DAEMON_PIDS
363 $ cat other.pid >> $DAEMON_PIDS
364
364
365 Doing the actual push: Abort error
365 Doing the actual push: Abort error
366
366
367 $ cat << EOF >> $HGRCPATH
367 $ cat << EOF >> $HGRCPATH
368 > [failpush]
368 > [failpush]
369 > reason = abort
369 > reason = abort
370 > EOF
370 > EOF
371
371
372 $ hg -R main push other -r e7ec4e813ba6
372 $ hg -R main push other -r e7ec4e813ba6
373 pushing to other
373 pushing to other
374 searching for changes
374 searching for changes
375 abort: Abandon ship!
375 abort: Abandon ship!
376 (don't panic)
376 (don't panic)
377 [255]
377 [255]
378
378
379 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
379 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
380 pushing to ssh://user@dummy/other
380 pushing to ssh://user@dummy/other
381 searching for changes
381 searching for changes
382 abort: Abandon ship!
382 abort: Abandon ship!
383 (don't panic)
383 (don't panic)
384 [255]
384 [255]
385
385
386 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
386 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
387 pushing to http://localhost:$HGPORT2/
387 pushing to http://localhost:$HGPORT2/
388 searching for changes
388 searching for changes
389 abort: Abandon ship!
389 abort: Abandon ship!
390 (don't panic)
390 (don't panic)
391 [255]
391 [255]
392
392
393
393
394 Doing the actual push: unknown mandatory parts
394 Doing the actual push: unknown mandatory parts
395
395
396 $ cat << EOF >> $HGRCPATH
396 $ cat << EOF >> $HGRCPATH
397 > [failpush]
397 > [failpush]
398 > reason = unknown
398 > reason = unknown
399 > EOF
399 > EOF
400
400
401 $ hg -R main push other -r e7ec4e813ba6
401 $ hg -R main push other -r e7ec4e813ba6
402 pushing to other
402 pushing to other
403 searching for changes
403 searching for changes
404 abort: missing support for test:unknown
404 abort: missing support for test:unknown
405 [255]
405 [255]
406
406
407 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
407 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
408 pushing to ssh://user@dummy/other
408 pushing to ssh://user@dummy/other
409 searching for changes
409 searching for changes
410 abort: missing support for test:unknown
410 abort: missing support for test:unknown
411 [255]
411 [255]
412
412
413 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
413 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
414 pushing to http://localhost:$HGPORT2/
414 pushing to http://localhost:$HGPORT2/
415 searching for changes
415 searching for changes
416 abort: missing support for test:unknown
416 abort: missing support for test:unknown
417 [255]
417 [255]
418
418
419 Doing the actual push: race
419 Doing the actual push: race
420
420
421 $ cat << EOF >> $HGRCPATH
421 $ cat << EOF >> $HGRCPATH
422 > [failpush]
422 > [failpush]
423 > reason = race
423 > reason = race
424 > EOF
424 > EOF
425
425
426 $ hg -R main push other -r e7ec4e813ba6
426 $ hg -R main push other -r e7ec4e813ba6
427 pushing to other
427 pushing to other
428 searching for changes
428 searching for changes
429 abort: push failed:
429 abort: push failed:
430 'repository changed while pushing - please try again'
430 'repository changed while pushing - please try again'
431 [255]
431 [255]
432
432
433 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
433 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
434 pushing to ssh://user@dummy/other
434 pushing to ssh://user@dummy/other
435 searching for changes
435 searching for changes
436 abort: push failed:
436 abort: push failed:
437 'repository changed while pushing - please try again'
437 'repository changed while pushing - please try again'
438 [255]
438 [255]
439
439
440 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
440 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
441 pushing to http://localhost:$HGPORT2/
441 pushing to http://localhost:$HGPORT2/
442 searching for changes
442 searching for changes
443 abort: push failed:
443 abort: push failed:
444 'repository changed while pushing - please try again'
444 'repository changed while pushing - please try again'
445 [255]
445 [255]
446
446
447 Doing the actual push: hook abort
447 Doing the actual push: hook abort
448
448
449 $ cat << EOF >> $HGRCPATH
449 $ cat << EOF >> $HGRCPATH
450 > [failpush]
450 > [failpush]
451 > reason =
451 > reason =
452 > [hooks]
452 > [hooks]
453 > b2x-pretransactionclose.failpush = false
453 > b2x-pretransactionclose.failpush = false
454 > EOF
454 > EOF
455
455
456 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
456 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
457 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
457 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
458 $ cat other.pid >> $DAEMON_PIDS
458 $ cat other.pid >> $DAEMON_PIDS
459
459
460 $ hg -R main push other -r e7ec4e813ba6
460 $ hg -R main push other -r e7ec4e813ba6
461 pushing to other
461 pushing to other
462 searching for changes
462 searching for changes
463 transaction abort!
463 transaction abort!
464 rollback completed
464 rollback completed
465 changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=e7ec4e813ba6b07be2a0516ce1a74bb4e503f91a HG_SOURCE=push HG_URL=push
466 abort: b2x-pretransactionclose.failpush hook exited with status 1
465 abort: b2x-pretransactionclose.failpush hook exited with status 1
467 [255]
466 [255]
468
467
469 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
468 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
470 pushing to ssh://user@dummy/other
469 pushing to ssh://user@dummy/other
471 searching for changes
470 searching for changes
472 abort: b2x-pretransactionclose.failpush hook exited with status 1
471 abort: b2x-pretransactionclose.failpush hook exited with status 1
473 remote: transaction abort!
472 remote: transaction abort!
474 remote: rollback completed
473 remote: rollback completed
475 remote: changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=e7ec4e813ba6b07be2a0516ce1a74bb4e503f91a HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
476 [255]
474 [255]
477
475
478 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
476 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
479 pushing to http://localhost:$HGPORT2/
477 pushing to http://localhost:$HGPORT2/
480 searching for changes
478 searching for changes
481 abort: b2x-pretransactionclose.failpush hook exited with status 1
479 abort: b2x-pretransactionclose.failpush hook exited with status 1
482 [255]
480 [255]
483
481
484
482
General Comments 0
You need to be logged in to leave comments. Login now