##// END OF EJS Templates
bundle-ng: simplify lookup and state handling...
Benoit Boissinot -
r19207:a67e1380 default
parent child Browse files
Show More
@@ -1,408 +1,420 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import nullrev, hex
9 from node import nullrev, hex
10 import mdiff, util, dagutil
10 import mdiff, util, dagutil
11 import struct, os, bz2, zlib, tempfile
11 import struct, os, bz2, zlib, tempfile
12
12
13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
14
14
15 def readexactly(stream, n):
15 def readexactly(stream, n):
16 '''read n bytes from stream.read and abort if less was available'''
16 '''read n bytes from stream.read and abort if less was available'''
17 s = stream.read(n)
17 s = stream.read(n)
18 if len(s) < n:
18 if len(s) < n:
19 raise util.Abort(_("stream ended unexpectedly"
19 raise util.Abort(_("stream ended unexpectedly"
20 " (got %d bytes, expected %d)")
20 " (got %d bytes, expected %d)")
21 % (len(s), n))
21 % (len(s), n))
22 return s
22 return s
23
23
24 def getchunk(stream):
24 def getchunk(stream):
25 """return the next chunk from stream as a string"""
25 """return the next chunk from stream as a string"""
26 d = readexactly(stream, 4)
26 d = readexactly(stream, 4)
27 l = struct.unpack(">l", d)[0]
27 l = struct.unpack(">l", d)[0]
28 if l <= 4:
28 if l <= 4:
29 if l:
29 if l:
30 raise util.Abort(_("invalid chunk length %d") % l)
30 raise util.Abort(_("invalid chunk length %d") % l)
31 return ""
31 return ""
32 return readexactly(stream, l - 4)
32 return readexactly(stream, l - 4)
33
33
34 def chunkheader(length):
34 def chunkheader(length):
35 """return a changegroup chunk header (string)"""
35 """return a changegroup chunk header (string)"""
36 return struct.pack(">l", length + 4)
36 return struct.pack(">l", length + 4)
37
37
38 def closechunk():
38 def closechunk():
39 """return a changegroup chunk header (string) for a zero-length chunk"""
39 """return a changegroup chunk header (string) for a zero-length chunk"""
40 return struct.pack(">l", 0)
40 return struct.pack(">l", 0)
41
41
42 class nocompress(object):
42 class nocompress(object):
43 def compress(self, x):
43 def compress(self, x):
44 return x
44 return x
45 def flush(self):
45 def flush(self):
46 return ""
46 return ""
47
47
48 bundletypes = {
48 bundletypes = {
49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
50 # since the unification ssh accepts a header but there
50 # since the unification ssh accepts a header but there
51 # is no capability signaling it.
51 # is no capability signaling it.
52 "HG10UN": ("HG10UN", nocompress),
52 "HG10UN": ("HG10UN", nocompress),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
55 }
55 }
56
56
57 # hgweb uses this list to communicate its preferred type
57 # hgweb uses this list to communicate its preferred type
58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
59
59
60 def writebundle(cg, filename, bundletype):
60 def writebundle(cg, filename, bundletype):
61 """Write a bundle file and return its filename.
61 """Write a bundle file and return its filename.
62
62
63 Existing files will not be overwritten.
63 Existing files will not be overwritten.
64 If no filename is specified, a temporary file is created.
64 If no filename is specified, a temporary file is created.
65 bz2 compression can be turned off.
65 bz2 compression can be turned off.
66 The bundle file will be deleted in case of errors.
66 The bundle file will be deleted in case of errors.
67 """
67 """
68
68
69 fh = None
69 fh = None
70 cleanup = None
70 cleanup = None
71 try:
71 try:
72 if filename:
72 if filename:
73 fh = open(filename, "wb")
73 fh = open(filename, "wb")
74 else:
74 else:
75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
76 fh = os.fdopen(fd, "wb")
76 fh = os.fdopen(fd, "wb")
77 cleanup = filename
77 cleanup = filename
78
78
79 header, compressor = bundletypes[bundletype]
79 header, compressor = bundletypes[bundletype]
80 fh.write(header)
80 fh.write(header)
81 z = compressor()
81 z = compressor()
82
82
83 # parse the changegroup data, otherwise we will block
83 # parse the changegroup data, otherwise we will block
84 # in case of sshrepo because we don't know the end of the stream
84 # in case of sshrepo because we don't know the end of the stream
85
85
86 # an empty chunkgroup is the end of the changegroup
86 # an empty chunkgroup is the end of the changegroup
87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
88 # after that, an empty chunkgroup is the end of the changegroup
88 # after that, an empty chunkgroup is the end of the changegroup
89 empty = False
89 empty = False
90 count = 0
90 count = 0
91 while not empty or count <= 2:
91 while not empty or count <= 2:
92 empty = True
92 empty = True
93 count += 1
93 count += 1
94 while True:
94 while True:
95 chunk = getchunk(cg)
95 chunk = getchunk(cg)
96 if not chunk:
96 if not chunk:
97 break
97 break
98 empty = False
98 empty = False
99 fh.write(z.compress(chunkheader(len(chunk))))
99 fh.write(z.compress(chunkheader(len(chunk))))
100 pos = 0
100 pos = 0
101 while pos < len(chunk):
101 while pos < len(chunk):
102 next = pos + 2**20
102 next = pos + 2**20
103 fh.write(z.compress(chunk[pos:next]))
103 fh.write(z.compress(chunk[pos:next]))
104 pos = next
104 pos = next
105 fh.write(z.compress(closechunk()))
105 fh.write(z.compress(closechunk()))
106 fh.write(z.flush())
106 fh.write(z.flush())
107 cleanup = None
107 cleanup = None
108 return filename
108 return filename
109 finally:
109 finally:
110 if fh is not None:
110 if fh is not None:
111 fh.close()
111 fh.close()
112 if cleanup is not None:
112 if cleanup is not None:
113 os.unlink(cleanup)
113 os.unlink(cleanup)
114
114
115 def decompressor(fh, alg):
115 def decompressor(fh, alg):
116 if alg == 'UN':
116 if alg == 'UN':
117 return fh
117 return fh
118 elif alg == 'GZ':
118 elif alg == 'GZ':
119 def generator(f):
119 def generator(f):
120 zd = zlib.decompressobj()
120 zd = zlib.decompressobj()
121 for chunk in util.filechunkiter(f):
121 for chunk in util.filechunkiter(f):
122 yield zd.decompress(chunk)
122 yield zd.decompress(chunk)
123 elif alg == 'BZ':
123 elif alg == 'BZ':
124 def generator(f):
124 def generator(f):
125 zd = bz2.BZ2Decompressor()
125 zd = bz2.BZ2Decompressor()
126 zd.decompress("BZ")
126 zd.decompress("BZ")
127 for chunk in util.filechunkiter(f, 4096):
127 for chunk in util.filechunkiter(f, 4096):
128 yield zd.decompress(chunk)
128 yield zd.decompress(chunk)
129 else:
129 else:
130 raise util.Abort("unknown bundle compression '%s'" % alg)
130 raise util.Abort("unknown bundle compression '%s'" % alg)
131 return util.chunkbuffer(generator(fh))
131 return util.chunkbuffer(generator(fh))
132
132
133 class unbundle10(object):
133 class unbundle10(object):
134 deltaheader = _BUNDLE10_DELTA_HEADER
134 deltaheader = _BUNDLE10_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 def __init__(self, fh, alg):
136 def __init__(self, fh, alg):
137 self._stream = decompressor(fh, alg)
137 self._stream = decompressor(fh, alg)
138 self._type = alg
138 self._type = alg
139 self.callback = None
139 self.callback = None
140 def compressed(self):
140 def compressed(self):
141 return self._type != 'UN'
141 return self._type != 'UN'
142 def read(self, l):
142 def read(self, l):
143 return self._stream.read(l)
143 return self._stream.read(l)
144 def seek(self, pos):
144 def seek(self, pos):
145 return self._stream.seek(pos)
145 return self._stream.seek(pos)
146 def tell(self):
146 def tell(self):
147 return self._stream.tell()
147 return self._stream.tell()
148 def close(self):
148 def close(self):
149 return self._stream.close()
149 return self._stream.close()
150
150
151 def chunklength(self):
151 def chunklength(self):
152 d = readexactly(self._stream, 4)
152 d = readexactly(self._stream, 4)
153 l = struct.unpack(">l", d)[0]
153 l = struct.unpack(">l", d)[0]
154 if l <= 4:
154 if l <= 4:
155 if l:
155 if l:
156 raise util.Abort(_("invalid chunk length %d") % l)
156 raise util.Abort(_("invalid chunk length %d") % l)
157 return 0
157 return 0
158 if self.callback:
158 if self.callback:
159 self.callback()
159 self.callback()
160 return l - 4
160 return l - 4
161
161
162 def changelogheader(self):
162 def changelogheader(self):
163 """v10 does not have a changelog header chunk"""
163 """v10 does not have a changelog header chunk"""
164 return {}
164 return {}
165
165
166 def manifestheader(self):
166 def manifestheader(self):
167 """v10 does not have a manifest header chunk"""
167 """v10 does not have a manifest header chunk"""
168 return {}
168 return {}
169
169
170 def filelogheader(self):
170 def filelogheader(self):
171 """return the header of the filelogs chunk, v10 only has the filename"""
171 """return the header of the filelogs chunk, v10 only has the filename"""
172 l = self.chunklength()
172 l = self.chunklength()
173 if not l:
173 if not l:
174 return {}
174 return {}
175 fname = readexactly(self._stream, l)
175 fname = readexactly(self._stream, l)
176 return dict(filename=fname)
176 return dict(filename=fname)
177
177
178 def _deltaheader(self, headertuple, prevnode):
178 def _deltaheader(self, headertuple, prevnode):
179 node, p1, p2, cs = headertuple
179 node, p1, p2, cs = headertuple
180 if prevnode is None:
180 if prevnode is None:
181 deltabase = p1
181 deltabase = p1
182 else:
182 else:
183 deltabase = prevnode
183 deltabase = prevnode
184 return node, p1, p2, deltabase, cs
184 return node, p1, p2, deltabase, cs
185
185
186 def deltachunk(self, prevnode):
186 def deltachunk(self, prevnode):
187 l = self.chunklength()
187 l = self.chunklength()
188 if not l:
188 if not l:
189 return {}
189 return {}
190 headerdata = readexactly(self._stream, self.deltaheadersize)
190 headerdata = readexactly(self._stream, self.deltaheadersize)
191 header = struct.unpack(self.deltaheader, headerdata)
191 header = struct.unpack(self.deltaheader, headerdata)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
194 return dict(node=node, p1=p1, p2=p2, cs=cs,
194 return dict(node=node, p1=p1, p2=p2, cs=cs,
195 deltabase=deltabase, delta=delta)
195 deltabase=deltabase, delta=delta)
196
196
197 class headerlessfixup(object):
197 class headerlessfixup(object):
198 def __init__(self, fh, h):
198 def __init__(self, fh, h):
199 self._h = h
199 self._h = h
200 self._fh = fh
200 self._fh = fh
201 def read(self, n):
201 def read(self, n):
202 if self._h:
202 if self._h:
203 d, self._h = self._h[:n], self._h[n:]
203 d, self._h = self._h[:n], self._h[n:]
204 if len(d) < n:
204 if len(d) < n:
205 d += readexactly(self._fh, n - len(d))
205 d += readexactly(self._fh, n - len(d))
206 return d
206 return d
207 return readexactly(self._fh, n)
207 return readexactly(self._fh, n)
208
208
209 def readbundle(fh, fname):
209 def readbundle(fh, fname):
210 header = readexactly(fh, 6)
210 header = readexactly(fh, 6)
211
211
212 if not fname:
212 if not fname:
213 fname = "stream"
213 fname = "stream"
214 if not header.startswith('HG') and header.startswith('\0'):
214 if not header.startswith('HG') and header.startswith('\0'):
215 fh = headerlessfixup(fh, header)
215 fh = headerlessfixup(fh, header)
216 header = "HG10UN"
216 header = "HG10UN"
217
217
218 magic, version, alg = header[0:2], header[2:4], header[4:6]
218 magic, version, alg = header[0:2], header[2:4], header[4:6]
219
219
220 if magic != 'HG':
220 if magic != 'HG':
221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
222 if version != '10':
222 if version != '10':
223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
224 return unbundle10(fh, alg)
224 return unbundle10(fh, alg)
225
225
226 class bundle10(object):
226 class bundle10(object):
227 deltaheader = _BUNDLE10_DELTA_HEADER
227 deltaheader = _BUNDLE10_DELTA_HEADER
228 def __init__(self, repo, bundlecaps=None):
228 def __init__(self, repo, bundlecaps=None):
229 """Given a source repo, construct a bundler.
229 """Given a source repo, construct a bundler.
230
230
231 bundlecaps is optional and can be used to specify the set of
231 bundlecaps is optional and can be used to specify the set of
232 capabilities which can be used to build the bundle.
232 capabilities which can be used to build the bundle.
233 """
233 """
234 # Set of capabilities we can use to build the bundle.
234 # Set of capabilities we can use to build the bundle.
235 if bundlecaps is None:
235 if bundlecaps is None:
236 bundlecaps = set()
236 bundlecaps = set()
237 self._bundlecaps = bundlecaps
237 self._bundlecaps = bundlecaps
238 self._changelog = repo.changelog
238 self._changelog = repo.changelog
239 self._manifest = repo.manifest
239 self._manifest = repo.manifest
240 reorder = repo.ui.config('bundle', 'reorder', 'auto')
240 reorder = repo.ui.config('bundle', 'reorder', 'auto')
241 if reorder == 'auto':
241 if reorder == 'auto':
242 reorder = None
242 reorder = None
243 else:
243 else:
244 reorder = util.parsebool(reorder)
244 reorder = util.parsebool(reorder)
245 self._repo = repo
245 self._repo = repo
246 self._reorder = reorder
246 self._reorder = reorder
247 self.count = [0, 0]
248 def close(self):
247 def close(self):
249 return closechunk()
248 return closechunk()
250
249
251 def fileheader(self, fname):
250 def fileheader(self, fname):
252 return chunkheader(len(fname)) + fname
251 return chunkheader(len(fname)) + fname
253
252
254 def group(self, nodelist, revlog, reorder=None):
253 def group(self, nodelist, revlog, lookup, reorder=None):
255 """Calculate a delta group, yielding a sequence of changegroup chunks
254 """Calculate a delta group, yielding a sequence of changegroup chunks
256 (strings).
255 (strings).
257
256
258 Given a list of changeset revs, return a set of deltas and
257 Given a list of changeset revs, return a set of deltas and
259 metadata corresponding to nodes. The first delta is
258 metadata corresponding to nodes. The first delta is
260 first parent(nodelist[0]) -> nodelist[0], the receiver is
259 first parent(nodelist[0]) -> nodelist[0], the receiver is
261 guaranteed to have this parent as it has all history before
260 guaranteed to have this parent as it has all history before
262 these changesets. In the case firstparent is nullrev the
261 these changesets. In the case firstparent is nullrev the
263 changegroup starts with a full revision.
262 changegroup starts with a full revision.
264 """
263 """
265
264
266 # if we don't have any revisions touched by these changesets, bail
265 # if we don't have any revisions touched by these changesets, bail
267 if len(nodelist) == 0:
266 if len(nodelist) == 0:
268 yield self.close()
267 yield self.close()
269 return
268 return
270
269
271 # for generaldelta revlogs, we linearize the revs; this will both be
270 # for generaldelta revlogs, we linearize the revs; this will both be
272 # much quicker and generate a much smaller bundle
271 # much quicker and generate a much smaller bundle
273 if (revlog._generaldelta and reorder is not False) or reorder:
272 if (revlog._generaldelta and reorder is not False) or reorder:
274 dag = dagutil.revlogdag(revlog)
273 dag = dagutil.revlogdag(revlog)
275 revs = set(revlog.rev(n) for n in nodelist)
274 revs = set(revlog.rev(n) for n in nodelist)
276 revs = dag.linearize(revs)
275 revs = dag.linearize(revs)
277 else:
276 else:
278 revs = sorted([revlog.rev(n) for n in nodelist])
277 revs = sorted([revlog.rev(n) for n in nodelist])
279
278
280 # add the parent of the first rev
279 # add the parent of the first rev
281 p = revlog.parentrevs(revs[0])[0]
280 p = revlog.parentrevs(revs[0])[0]
282 revs.insert(0, p)
281 revs.insert(0, p)
283
282
284 # build deltas
283 # build deltas
285 for r in xrange(len(revs) - 1):
284 for r in xrange(len(revs) - 1):
286 prev, curr = revs[r], revs[r + 1]
285 prev, curr = revs[r], revs[r + 1]
287 for c in self.revchunk(revlog, curr, prev):
286 linknode = lookup(revlog.node(curr))
287 for c in self.revchunk(revlog, curr, prev, linknode):
288 yield c
288 yield c
289
289
290 yield self.close()
290 yield self.close()
291
291
292 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
292 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
293 '''yield a sequence of changegroup chunks (strings)'''
293 '''yield a sequence of changegroup chunks (strings)'''
294 repo = self._repo
294 repo = self._repo
295 cl = self._changelog
295 cl = self._changelog
296 mf = self._manifest
296 mf = self._manifest
297 reorder = self._reorder
297 reorder = self._reorder
298 progress = repo.ui.progress
298 progress = repo.ui.progress
299 count = self.count
299 # Keep track of progress, this is a list since it is modified by revlog
300 # callbacks. First item is the number of items done, second is the
301 # total number to be processed.
302 count = [0, 0]
300 _bundling = _('bundling')
303 _bundling = _('bundling')
301 _changesets = _('changesets')
304 _changesets = _('changesets')
302 _manifests = _('manifests')
305 _manifests = _('manifests')
303 _files = _('files')
306 _files = _('files')
304
307
305 mfs = {} # needed manifests
308 mfs = {} # needed manifests
306 fnodes = {} # needed file nodes
309 fnodes = {} # needed file nodes
307 changedfiles = set()
310 changedfiles = set()
308 fstate = ['', {}]
309
311
310 # filter any nodes that claim to be part of the known set
312 # filter any nodes that claim to be part of the known set
311 def prune(revlog, missing):
313 def prune(revlog, missing):
312 rr, rl = revlog.rev, revlog.linkrev
314 rr, rl = revlog.rev, revlog.linkrev
313 return [n for n in missing
315 return [n for n in missing
314 if rl(rr(n)) not in commonrevs]
316 if rl(rr(n)) not in commonrevs]
315
317
316 def lookup(revlog, x):
318 # Callback for the changelog, used to collect changed files and manifest
317 if revlog == cl:
319 # nodes.
318 c = cl.read(x)
320 # Returns the linkrev node (identity in the changelog case).
319 changedfiles.update(c[3])
321 def lookupcl(x):
320 mfs.setdefault(c[0], x)
322 c = cl.read(x)
321 count[0] += 1
323 changedfiles.update(c[3])
322 progress(_bundling, count[0],
324 # record the first changeset introducing this manifest version
323 unit=_changesets, total=count[1])
325 mfs.setdefault(c[0], x)
324 return x
326 count[0] += 1
325 elif revlog == mf:
327 progress(_bundling, count[0],
326 clnode = mfs[x]
328 unit=_changesets, total=count[1])
327 if not fastpathlinkrev:
329 return x
328 mdata = mf.readfast(x)
329 for f, n in mdata.iteritems():
330 if f in changedfiles:
331 fnodes[f].setdefault(n, clnode)
332 count[0] += 1
333 progress(_bundling, count[0],
334 unit=_manifests, total=count[1])
335 return clnode
336 else:
337 progress(_bundling, count[0], item=fstate[0],
338 unit=_files, total=count[1])
339 return fstate[1][x]
340
330
341 self._lookup = lookup
331 # Callback for the manifest, used to collect linkrevs for filelog
332 # revisions.
333 # Returns the linkrev node (collected in lookupcl).
334 def lookupmf(x):
335 clnode = mfs[x]
336 if not fastpathlinkrev:
337 mdata = mf.readfast(x)
338 for f, n in mdata.iteritems():
339 if f in changedfiles:
340 # record the first changeset introducing this filelog
341 # version
342 fnodes[f].setdefault(n, clnode)
343 count[0] += 1
344 progress(_bundling, count[0],
345 unit=_manifests, total=count[1])
346 return clnode
342
347
343 count[:] = [0, len(clnodes)]
348 count[:] = [0, len(clnodes)]
344 for chunk in self.group(clnodes, cl, reorder=reorder):
349 for chunk in self.group(clnodes, cl, lookupcl, reorder=reorder):
345 yield chunk
350 yield chunk
346 progress(_bundling, None)
351 progress(_bundling, None)
347
352
348 for f in changedfiles:
353 for f in changedfiles:
349 fnodes[f] = {}
354 fnodes[f] = {}
350 count[:] = [0, len(mfs)]
355 count[:] = [0, len(mfs)]
351 mfnodes = prune(mf, mfs)
356 mfnodes = prune(mf, mfs)
352 for chunk in self.group(mfnodes, mf, reorder=reorder):
357 for chunk in self.group(mfnodes, mf, lookupmf, reorder=reorder):
353 yield chunk
358 yield chunk
354 progress(_bundling, None)
359 progress(_bundling, None)
355
360
356 mfs.clear()
361 mfs.clear()
357 count[:] = [0, len(changedfiles)]
362 count[:] = [0, len(changedfiles)]
358 for fname in sorted(changedfiles):
363 for fname in sorted(changedfiles):
359 filerevlog = repo.file(fname)
364 filerevlog = repo.file(fname)
360 if not len(filerevlog):
365 if not len(filerevlog):
361 raise util.Abort(_("empty or missing revlog for %s")
366 raise util.Abort(_("empty or missing revlog for %s")
362 % fname)
367 % fname)
363
368
364 if fastpathlinkrev:
369 if fastpathlinkrev:
365 ln, llr = filerevlog.node, filerevlog.linkrev
370 ln, llr = filerevlog.node, filerevlog.linkrev
366 def genfilenodes():
371 def genfilenodes():
367 for r in filerevlog:
372 for r in filerevlog:
368 linkrev = llr(r)
373 linkrev = llr(r)
369 if linkrev not in commonrevs:
374 if linkrev not in commonrevs:
370 yield filerevlog.node(r), cl.node(linkrev)
375 yield filerevlog.node(r), cl.node(linkrev)
371 fnodes[fname] = dict(genfilenodes())
376 fnodes[fname] = dict(genfilenodes())
372 fstate[0] = fname
377
373 fstate[1] = fnodes.pop(fname, {})
378 linkrevnodes = fnodes.pop(fname, {})
374 filenodes = prune(filerevlog, fstate[1])
379 # Lookup for filenodes, we collected the linkrev nodes above in the
380 # fastpath case and with lookupmf in the slowpath case.
381 def lookupfilelog(x):
382 progress(_bundling, count[0], item=fname,
383 unit=_files, total=count[1])
384 return linkrevnodes[x]
385
386 filenodes = prune(filerevlog, linkrevnodes)
375 if filenodes:
387 if filenodes:
376 count[0] += 1
388 count[0] += 1
377 yield self.fileheader(fname)
389 yield self.fileheader(fname)
378 for chunk in self.group(filenodes, filerevlog, reorder):
390 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
391 reorder):
379 yield chunk
392 yield chunk
380 yield self.close()
393 yield self.close()
381 progress(_bundling, None)
394 progress(_bundling, None)
382
395
383 if clnodes:
396 if clnodes:
384 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
397 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
385
398
386 def revchunk(self, revlog, rev, prev):
399 def revchunk(self, revlog, rev, prev, linknode):
387 node = revlog.node(rev)
400 node = revlog.node(rev)
388 p1, p2 = revlog.parentrevs(rev)
401 p1, p2 = revlog.parentrevs(rev)
389 base = prev
402 base = prev
390
403
391 prefix = ''
404 prefix = ''
392 if base == nullrev:
405 if base == nullrev:
393 delta = revlog.revision(node)
406 delta = revlog.revision(node)
394 prefix = mdiff.trivialdiffheader(len(delta))
407 prefix = mdiff.trivialdiffheader(len(delta))
395 else:
408 else:
396 delta = revlog.revdiff(base, rev)
409 delta = revlog.revdiff(base, rev)
397 linknode = self._lookup(revlog, node)
398 p1n, p2n = revlog.parents(node)
410 p1n, p2n = revlog.parents(node)
399 basenode = revlog.node(base)
411 basenode = revlog.node(base)
400 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
412 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
401 meta += prefix
413 meta += prefix
402 l = len(meta) + len(delta)
414 l = len(meta) + len(delta)
403 yield chunkheader(l)
415 yield chunkheader(l)
404 yield meta
416 yield meta
405 yield delta
417 yield delta
406 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
418 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
407 # do nothing with basenode, it is implicitly the previous one in HG10
419 # do nothing with basenode, it is implicitly the previous one in HG10
408 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
420 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
General Comments 0
You need to be logged in to leave comments. Login now