##// END OF EJS Templates
bundle-ng: move progress handling out of the linkrev callback
Benoit Boissinot -
r19208:0b564cf3 default
parent child Browse files
Show More
@@ -1,420 +1,415 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import nullrev, hex
9 from node import nullrev, hex
10 import mdiff, util, dagutil
10 import mdiff, util, dagutil
11 import struct, os, bz2, zlib, tempfile
11 import struct, os, bz2, zlib, tempfile
12
12
13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
14
14
15 def readexactly(stream, n):
15 def readexactly(stream, n):
16 '''read n bytes from stream.read and abort if less was available'''
16 '''read n bytes from stream.read and abort if less was available'''
17 s = stream.read(n)
17 s = stream.read(n)
18 if len(s) < n:
18 if len(s) < n:
19 raise util.Abort(_("stream ended unexpectedly"
19 raise util.Abort(_("stream ended unexpectedly"
20 " (got %d bytes, expected %d)")
20 " (got %d bytes, expected %d)")
21 % (len(s), n))
21 % (len(s), n))
22 return s
22 return s
23
23
24 def getchunk(stream):
24 def getchunk(stream):
25 """return the next chunk from stream as a string"""
25 """return the next chunk from stream as a string"""
26 d = readexactly(stream, 4)
26 d = readexactly(stream, 4)
27 l = struct.unpack(">l", d)[0]
27 l = struct.unpack(">l", d)[0]
28 if l <= 4:
28 if l <= 4:
29 if l:
29 if l:
30 raise util.Abort(_("invalid chunk length %d") % l)
30 raise util.Abort(_("invalid chunk length %d") % l)
31 return ""
31 return ""
32 return readexactly(stream, l - 4)
32 return readexactly(stream, l - 4)
33
33
34 def chunkheader(length):
34 def chunkheader(length):
35 """return a changegroup chunk header (string)"""
35 """return a changegroup chunk header (string)"""
36 return struct.pack(">l", length + 4)
36 return struct.pack(">l", length + 4)
37
37
38 def closechunk():
38 def closechunk():
39 """return a changegroup chunk header (string) for a zero-length chunk"""
39 """return a changegroup chunk header (string) for a zero-length chunk"""
40 return struct.pack(">l", 0)
40 return struct.pack(">l", 0)
41
41
42 class nocompress(object):
42 class nocompress(object):
43 def compress(self, x):
43 def compress(self, x):
44 return x
44 return x
45 def flush(self):
45 def flush(self):
46 return ""
46 return ""
47
47
48 bundletypes = {
48 bundletypes = {
49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
50 # since the unification ssh accepts a header but there
50 # since the unification ssh accepts a header but there
51 # is no capability signaling it.
51 # is no capability signaling it.
52 "HG10UN": ("HG10UN", nocompress),
52 "HG10UN": ("HG10UN", nocompress),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
55 }
55 }
56
56
57 # hgweb uses this list to communicate its preferred type
57 # hgweb uses this list to communicate its preferred type
58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
59
59
60 def writebundle(cg, filename, bundletype):
60 def writebundle(cg, filename, bundletype):
61 """Write a bundle file and return its filename.
61 """Write a bundle file and return its filename.
62
62
63 Existing files will not be overwritten.
63 Existing files will not be overwritten.
64 If no filename is specified, a temporary file is created.
64 If no filename is specified, a temporary file is created.
65 bz2 compression can be turned off.
65 bz2 compression can be turned off.
66 The bundle file will be deleted in case of errors.
66 The bundle file will be deleted in case of errors.
67 """
67 """
68
68
69 fh = None
69 fh = None
70 cleanup = None
70 cleanup = None
71 try:
71 try:
72 if filename:
72 if filename:
73 fh = open(filename, "wb")
73 fh = open(filename, "wb")
74 else:
74 else:
75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
76 fh = os.fdopen(fd, "wb")
76 fh = os.fdopen(fd, "wb")
77 cleanup = filename
77 cleanup = filename
78
78
79 header, compressor = bundletypes[bundletype]
79 header, compressor = bundletypes[bundletype]
80 fh.write(header)
80 fh.write(header)
81 z = compressor()
81 z = compressor()
82
82
83 # parse the changegroup data, otherwise we will block
83 # parse the changegroup data, otherwise we will block
84 # in case of sshrepo because we don't know the end of the stream
84 # in case of sshrepo because we don't know the end of the stream
85
85
86 # an empty chunkgroup is the end of the changegroup
86 # an empty chunkgroup is the end of the changegroup
87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
88 # after that, an empty chunkgroup is the end of the changegroup
88 # after that, an empty chunkgroup is the end of the changegroup
89 empty = False
89 empty = False
90 count = 0
90 count = 0
91 while not empty or count <= 2:
91 while not empty or count <= 2:
92 empty = True
92 empty = True
93 count += 1
93 count += 1
94 while True:
94 while True:
95 chunk = getchunk(cg)
95 chunk = getchunk(cg)
96 if not chunk:
96 if not chunk:
97 break
97 break
98 empty = False
98 empty = False
99 fh.write(z.compress(chunkheader(len(chunk))))
99 fh.write(z.compress(chunkheader(len(chunk))))
100 pos = 0
100 pos = 0
101 while pos < len(chunk):
101 while pos < len(chunk):
102 next = pos + 2**20
102 next = pos + 2**20
103 fh.write(z.compress(chunk[pos:next]))
103 fh.write(z.compress(chunk[pos:next]))
104 pos = next
104 pos = next
105 fh.write(z.compress(closechunk()))
105 fh.write(z.compress(closechunk()))
106 fh.write(z.flush())
106 fh.write(z.flush())
107 cleanup = None
107 cleanup = None
108 return filename
108 return filename
109 finally:
109 finally:
110 if fh is not None:
110 if fh is not None:
111 fh.close()
111 fh.close()
112 if cleanup is not None:
112 if cleanup is not None:
113 os.unlink(cleanup)
113 os.unlink(cleanup)
114
114
115 def decompressor(fh, alg):
115 def decompressor(fh, alg):
116 if alg == 'UN':
116 if alg == 'UN':
117 return fh
117 return fh
118 elif alg == 'GZ':
118 elif alg == 'GZ':
119 def generator(f):
119 def generator(f):
120 zd = zlib.decompressobj()
120 zd = zlib.decompressobj()
121 for chunk in util.filechunkiter(f):
121 for chunk in util.filechunkiter(f):
122 yield zd.decompress(chunk)
122 yield zd.decompress(chunk)
123 elif alg == 'BZ':
123 elif alg == 'BZ':
124 def generator(f):
124 def generator(f):
125 zd = bz2.BZ2Decompressor()
125 zd = bz2.BZ2Decompressor()
126 zd.decompress("BZ")
126 zd.decompress("BZ")
127 for chunk in util.filechunkiter(f, 4096):
127 for chunk in util.filechunkiter(f, 4096):
128 yield zd.decompress(chunk)
128 yield zd.decompress(chunk)
129 else:
129 else:
130 raise util.Abort("unknown bundle compression '%s'" % alg)
130 raise util.Abort("unknown bundle compression '%s'" % alg)
131 return util.chunkbuffer(generator(fh))
131 return util.chunkbuffer(generator(fh))
132
132
133 class unbundle10(object):
133 class unbundle10(object):
134 deltaheader = _BUNDLE10_DELTA_HEADER
134 deltaheader = _BUNDLE10_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 def __init__(self, fh, alg):
136 def __init__(self, fh, alg):
137 self._stream = decompressor(fh, alg)
137 self._stream = decompressor(fh, alg)
138 self._type = alg
138 self._type = alg
139 self.callback = None
139 self.callback = None
140 def compressed(self):
140 def compressed(self):
141 return self._type != 'UN'
141 return self._type != 'UN'
142 def read(self, l):
142 def read(self, l):
143 return self._stream.read(l)
143 return self._stream.read(l)
144 def seek(self, pos):
144 def seek(self, pos):
145 return self._stream.seek(pos)
145 return self._stream.seek(pos)
146 def tell(self):
146 def tell(self):
147 return self._stream.tell()
147 return self._stream.tell()
148 def close(self):
148 def close(self):
149 return self._stream.close()
149 return self._stream.close()
150
150
151 def chunklength(self):
151 def chunklength(self):
152 d = readexactly(self._stream, 4)
152 d = readexactly(self._stream, 4)
153 l = struct.unpack(">l", d)[0]
153 l = struct.unpack(">l", d)[0]
154 if l <= 4:
154 if l <= 4:
155 if l:
155 if l:
156 raise util.Abort(_("invalid chunk length %d") % l)
156 raise util.Abort(_("invalid chunk length %d") % l)
157 return 0
157 return 0
158 if self.callback:
158 if self.callback:
159 self.callback()
159 self.callback()
160 return l - 4
160 return l - 4
161
161
162 def changelogheader(self):
162 def changelogheader(self):
163 """v10 does not have a changelog header chunk"""
163 """v10 does not have a changelog header chunk"""
164 return {}
164 return {}
165
165
166 def manifestheader(self):
166 def manifestheader(self):
167 """v10 does not have a manifest header chunk"""
167 """v10 does not have a manifest header chunk"""
168 return {}
168 return {}
169
169
170 def filelogheader(self):
170 def filelogheader(self):
171 """return the header of the filelogs chunk, v10 only has the filename"""
171 """return the header of the filelogs chunk, v10 only has the filename"""
172 l = self.chunklength()
172 l = self.chunklength()
173 if not l:
173 if not l:
174 return {}
174 return {}
175 fname = readexactly(self._stream, l)
175 fname = readexactly(self._stream, l)
176 return dict(filename=fname)
176 return dict(filename=fname)
177
177
178 def _deltaheader(self, headertuple, prevnode):
178 def _deltaheader(self, headertuple, prevnode):
179 node, p1, p2, cs = headertuple
179 node, p1, p2, cs = headertuple
180 if prevnode is None:
180 if prevnode is None:
181 deltabase = p1
181 deltabase = p1
182 else:
182 else:
183 deltabase = prevnode
183 deltabase = prevnode
184 return node, p1, p2, deltabase, cs
184 return node, p1, p2, deltabase, cs
185
185
186 def deltachunk(self, prevnode):
186 def deltachunk(self, prevnode):
187 l = self.chunklength()
187 l = self.chunklength()
188 if not l:
188 if not l:
189 return {}
189 return {}
190 headerdata = readexactly(self._stream, self.deltaheadersize)
190 headerdata = readexactly(self._stream, self.deltaheadersize)
191 header = struct.unpack(self.deltaheader, headerdata)
191 header = struct.unpack(self.deltaheader, headerdata)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
194 return dict(node=node, p1=p1, p2=p2, cs=cs,
194 return dict(node=node, p1=p1, p2=p2, cs=cs,
195 deltabase=deltabase, delta=delta)
195 deltabase=deltabase, delta=delta)
196
196
197 class headerlessfixup(object):
197 class headerlessfixup(object):
198 def __init__(self, fh, h):
198 def __init__(self, fh, h):
199 self._h = h
199 self._h = h
200 self._fh = fh
200 self._fh = fh
201 def read(self, n):
201 def read(self, n):
202 if self._h:
202 if self._h:
203 d, self._h = self._h[:n], self._h[n:]
203 d, self._h = self._h[:n], self._h[n:]
204 if len(d) < n:
204 if len(d) < n:
205 d += readexactly(self._fh, n - len(d))
205 d += readexactly(self._fh, n - len(d))
206 return d
206 return d
207 return readexactly(self._fh, n)
207 return readexactly(self._fh, n)
208
208
209 def readbundle(fh, fname):
209 def readbundle(fh, fname):
210 header = readexactly(fh, 6)
210 header = readexactly(fh, 6)
211
211
212 if not fname:
212 if not fname:
213 fname = "stream"
213 fname = "stream"
214 if not header.startswith('HG') and header.startswith('\0'):
214 if not header.startswith('HG') and header.startswith('\0'):
215 fh = headerlessfixup(fh, header)
215 fh = headerlessfixup(fh, header)
216 header = "HG10UN"
216 header = "HG10UN"
217
217
218 magic, version, alg = header[0:2], header[2:4], header[4:6]
218 magic, version, alg = header[0:2], header[2:4], header[4:6]
219
219
220 if magic != 'HG':
220 if magic != 'HG':
221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
222 if version != '10':
222 if version != '10':
223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
224 return unbundle10(fh, alg)
224 return unbundle10(fh, alg)
225
225
226 class bundle10(object):
226 class bundle10(object):
227 deltaheader = _BUNDLE10_DELTA_HEADER
227 deltaheader = _BUNDLE10_DELTA_HEADER
228 def __init__(self, repo, bundlecaps=None):
228 def __init__(self, repo, bundlecaps=None):
229 """Given a source repo, construct a bundler.
229 """Given a source repo, construct a bundler.
230
230
231 bundlecaps is optional and can be used to specify the set of
231 bundlecaps is optional and can be used to specify the set of
232 capabilities which can be used to build the bundle.
232 capabilities which can be used to build the bundle.
233 """
233 """
234 # Set of capabilities we can use to build the bundle.
234 # Set of capabilities we can use to build the bundle.
235 if bundlecaps is None:
235 if bundlecaps is None:
236 bundlecaps = set()
236 bundlecaps = set()
237 self._bundlecaps = bundlecaps
237 self._bundlecaps = bundlecaps
238 self._changelog = repo.changelog
238 self._changelog = repo.changelog
239 self._manifest = repo.manifest
239 self._manifest = repo.manifest
240 reorder = repo.ui.config('bundle', 'reorder', 'auto')
240 reorder = repo.ui.config('bundle', 'reorder', 'auto')
241 if reorder == 'auto':
241 if reorder == 'auto':
242 reorder = None
242 reorder = None
243 else:
243 else:
244 reorder = util.parsebool(reorder)
244 reorder = util.parsebool(reorder)
245 self._repo = repo
245 self._repo = repo
246 self._reorder = reorder
246 self._reorder = reorder
247 self._progress = repo.ui.progress
247 def close(self):
248 def close(self):
248 return closechunk()
249 return closechunk()
249
250
250 def fileheader(self, fname):
251 def fileheader(self, fname):
251 return chunkheader(len(fname)) + fname
252 return chunkheader(len(fname)) + fname
252
253
253 def group(self, nodelist, revlog, lookup, reorder=None):
254 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
254 """Calculate a delta group, yielding a sequence of changegroup chunks
255 """Calculate a delta group, yielding a sequence of changegroup chunks
255 (strings).
256 (strings).
256
257
257 Given a list of changeset revs, return a set of deltas and
258 Given a list of changeset revs, return a set of deltas and
258 metadata corresponding to nodes. The first delta is
259 metadata corresponding to nodes. The first delta is
259 first parent(nodelist[0]) -> nodelist[0], the receiver is
260 first parent(nodelist[0]) -> nodelist[0], the receiver is
260 guaranteed to have this parent as it has all history before
261 guaranteed to have this parent as it has all history before
261 these changesets. In the case firstparent is nullrev the
262 these changesets. In the case firstparent is nullrev the
262 changegroup starts with a full revision.
263 changegroup starts with a full revision.
264
265 If units is not None, progress detail will be generated, units specifies
266 the type of revlog that is touched (changelog, manifest, etc.).
263 """
267 """
264
265 # if we don't have any revisions touched by these changesets, bail
268 # if we don't have any revisions touched by these changesets, bail
266 if len(nodelist) == 0:
269 if len(nodelist) == 0:
267 yield self.close()
270 yield self.close()
268 return
271 return
269
272
270 # for generaldelta revlogs, we linearize the revs; this will both be
273 # for generaldelta revlogs, we linearize the revs; this will both be
271 # much quicker and generate a much smaller bundle
274 # much quicker and generate a much smaller bundle
272 if (revlog._generaldelta and reorder is not False) or reorder:
275 if (revlog._generaldelta and reorder is not False) or reorder:
273 dag = dagutil.revlogdag(revlog)
276 dag = dagutil.revlogdag(revlog)
274 revs = set(revlog.rev(n) for n in nodelist)
277 revs = set(revlog.rev(n) for n in nodelist)
275 revs = dag.linearize(revs)
278 revs = dag.linearize(revs)
276 else:
279 else:
277 revs = sorted([revlog.rev(n) for n in nodelist])
280 revs = sorted([revlog.rev(n) for n in nodelist])
278
281
279 # add the parent of the first rev
282 # add the parent of the first rev
280 p = revlog.parentrevs(revs[0])[0]
283 p = revlog.parentrevs(revs[0])[0]
281 revs.insert(0, p)
284 revs.insert(0, p)
282
285
283 # build deltas
286 # build deltas
287 total = len(revs) - 1
288 msgbundling = _('bundling')
284 for r in xrange(len(revs) - 1):
289 for r in xrange(len(revs) - 1):
290 if units is not None:
291 self._progress(msgbundling, r + 1, unit=units, total=total)
285 prev, curr = revs[r], revs[r + 1]
292 prev, curr = revs[r], revs[r + 1]
286 linknode = lookup(revlog.node(curr))
293 linknode = lookup(revlog.node(curr))
287 for c in self.revchunk(revlog, curr, prev, linknode):
294 for c in self.revchunk(revlog, curr, prev, linknode):
288 yield c
295 yield c
289
296
290 yield self.close()
297 yield self.close()
291
298
292 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
299 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
293 '''yield a sequence of changegroup chunks (strings)'''
300 '''yield a sequence of changegroup chunks (strings)'''
294 repo = self._repo
301 repo = self._repo
295 cl = self._changelog
302 cl = self._changelog
296 mf = self._manifest
303 mf = self._manifest
297 reorder = self._reorder
304 reorder = self._reorder
298 progress = repo.ui.progress
305 progress = self._progress
299 # Keep track of progress, this is a list since it is modified by revlog
306
300 # callbacks. First item is the number of items done, second is the
307 # for progress output
301 # total number to be processed.
308 msgbundling = _('bundling')
302 count = [0, 0]
303 _bundling = _('bundling')
304 _changesets = _('changesets')
305 _manifests = _('manifests')
306 _files = _('files')
307
309
308 mfs = {} # needed manifests
310 mfs = {} # needed manifests
309 fnodes = {} # needed file nodes
311 fnodes = {} # needed file nodes
310 changedfiles = set()
312 changedfiles = set()
311
313
312 # filter any nodes that claim to be part of the known set
314 # filter any nodes that claim to be part of the known set
313 def prune(revlog, missing):
315 def prune(revlog, missing):
314 rr, rl = revlog.rev, revlog.linkrev
316 rr, rl = revlog.rev, revlog.linkrev
315 return [n for n in missing
317 return [n for n in missing if rl(rr(n)) not in commonrevs]
316 if rl(rr(n)) not in commonrevs]
317
318
318 # Callback for the changelog, used to collect changed files and manifest
319 # Callback for the changelog, used to collect changed files and manifest
319 # nodes.
320 # nodes.
320 # Returns the linkrev node (identity in the changelog case).
321 # Returns the linkrev node (identity in the changelog case).
321 def lookupcl(x):
322 def lookupcl(x):
322 c = cl.read(x)
323 c = cl.read(x)
323 changedfiles.update(c[3])
324 changedfiles.update(c[3])
324 # record the first changeset introducing this manifest version
325 # record the first changeset introducing this manifest version
325 mfs.setdefault(c[0], x)
326 mfs.setdefault(c[0], x)
326 count[0] += 1
327 progress(_bundling, count[0],
328 unit=_changesets, total=count[1])
329 return x
327 return x
330
328
331 # Callback for the manifest, used to collect linkrevs for filelog
329 # Callback for the manifest, used to collect linkrevs for filelog
332 # revisions.
330 # revisions.
333 # Returns the linkrev node (collected in lookupcl).
331 # Returns the linkrev node (collected in lookupcl).
334 def lookupmf(x):
332 def lookupmf(x):
335 clnode = mfs[x]
333 clnode = mfs[x]
336 if not fastpathlinkrev:
334 if not fastpathlinkrev:
337 mdata = mf.readfast(x)
335 mdata = mf.readfast(x)
338 for f, n in mdata.iteritems():
336 for f, n in mdata.iteritems():
339 if f in changedfiles:
337 if f in changedfiles:
340 # record the first changeset introducing this filelog
338 # record the first changeset introducing this filelog
341 # version
339 # version
342 fnodes[f].setdefault(n, clnode)
340 fnodes[f].setdefault(n, clnode)
343 count[0] += 1
344 progress(_bundling, count[0],
345 unit=_manifests, total=count[1])
346 return clnode
341 return clnode
347
342
348 count[:] = [0, len(clnodes)]
343 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
349 for chunk in self.group(clnodes, cl, lookupcl, reorder=reorder):
344 reorder=reorder):
350 yield chunk
345 yield chunk
351 progress(_bundling, None)
346 progress(msgbundling, None)
352
347
353 for f in changedfiles:
348 for f in changedfiles:
354 fnodes[f] = {}
349 fnodes[f] = {}
355 count[:] = [0, len(mfs)]
356 mfnodes = prune(mf, mfs)
350 mfnodes = prune(mf, mfs)
357 for chunk in self.group(mfnodes, mf, lookupmf, reorder=reorder):
351 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
352 reorder=reorder):
358 yield chunk
353 yield chunk
359 progress(_bundling, None)
354 progress(msgbundling, None)
360
355
361 mfs.clear()
356 mfs.clear()
362 count[:] = [0, len(changedfiles)]
357 total = len(changedfiles)
363 for fname in sorted(changedfiles):
358 # for progress output
359 msgfiles = _('files')
360 for i, fname in enumerate(sorted(changedfiles)):
364 filerevlog = repo.file(fname)
361 filerevlog = repo.file(fname)
365 if not len(filerevlog):
362 if not len(filerevlog):
366 raise util.Abort(_("empty or missing revlog for %s")
363 raise util.Abort(_("empty or missing revlog for %s") % fname)
367 % fname)
368
364
369 if fastpathlinkrev:
365 if fastpathlinkrev:
370 ln, llr = filerevlog.node, filerevlog.linkrev
366 ln, llr = filerevlog.node, filerevlog.linkrev
371 def genfilenodes():
367 def genfilenodes():
372 for r in filerevlog:
368 for r in filerevlog:
373 linkrev = llr(r)
369 linkrev = llr(r)
374 if linkrev not in commonrevs:
370 if linkrev not in commonrevs:
375 yield filerevlog.node(r), cl.node(linkrev)
371 yield filerevlog.node(r), cl.node(linkrev)
376 fnodes[fname] = dict(genfilenodes())
372 fnodes[fname] = dict(genfilenodes())
377
373
378 linkrevnodes = fnodes.pop(fname, {})
374 linkrevnodes = fnodes.pop(fname, {})
379 # Lookup for filenodes, we collected the linkrev nodes above in the
375 # Lookup for filenodes, we collected the linkrev nodes above in the
380 # fastpath case and with lookupmf in the slowpath case.
376 # fastpath case and with lookupmf in the slowpath case.
381 def lookupfilelog(x):
377 def lookupfilelog(x):
382 progress(_bundling, count[0], item=fname,
383 unit=_files, total=count[1])
384 return linkrevnodes[x]
378 return linkrevnodes[x]
385
379
386 filenodes = prune(filerevlog, linkrevnodes)
380 filenodes = prune(filerevlog, linkrevnodes)
387 if filenodes:
381 if filenodes:
388 count[0] += 1
382 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
383 total=total)
389 yield self.fileheader(fname)
384 yield self.fileheader(fname)
390 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
385 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
391 reorder):
386 reorder=reorder):
392 yield chunk
387 yield chunk
393 yield self.close()
388 yield self.close()
394 progress(_bundling, None)
389 progress(msgbundling, None)
395
390
396 if clnodes:
391 if clnodes:
397 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
392 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
398
393
399 def revchunk(self, revlog, rev, prev, linknode):
394 def revchunk(self, revlog, rev, prev, linknode):
400 node = revlog.node(rev)
395 node = revlog.node(rev)
401 p1, p2 = revlog.parentrevs(rev)
396 p1, p2 = revlog.parentrevs(rev)
402 base = prev
397 base = prev
403
398
404 prefix = ''
399 prefix = ''
405 if base == nullrev:
400 if base == nullrev:
406 delta = revlog.revision(node)
401 delta = revlog.revision(node)
407 prefix = mdiff.trivialdiffheader(len(delta))
402 prefix = mdiff.trivialdiffheader(len(delta))
408 else:
403 else:
409 delta = revlog.revdiff(base, rev)
404 delta = revlog.revdiff(base, rev)
410 p1n, p2n = revlog.parents(node)
405 p1n, p2n = revlog.parents(node)
411 basenode = revlog.node(base)
406 basenode = revlog.node(base)
412 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
407 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
413 meta += prefix
408 meta += prefix
414 l = len(meta) + len(delta)
409 l = len(meta) + len(delta)
415 yield chunkheader(l)
410 yield chunkheader(l)
416 yield meta
411 yield meta
417 yield delta
412 yield delta
418 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
413 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
419 # do nothing with basenode, it is implicitly the previous one in HG10
414 # do nothing with basenode, it is implicitly the previous one in HG10
420 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
415 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
General Comments 0
You need to be logged in to leave comments. Login now