##// END OF EJS Templates
changegroup: fix to allow empty manifest parts...
Durham Goode -
r34093:bbdca7e4 default
parent child Browse files
Show More
@@ -1,1011 +1,1024 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 dagutil,
23 dagutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def writechunks(ui, chunks, filename, vfs=None):
63 def writechunks(ui, chunks, filename, vfs=None):
64 """Write chunks to a file and return its filename.
64 """Write chunks to a file and return its filename.
65
65
66 The stream is assumed to be a bundle file.
66 The stream is assumed to be a bundle file.
67 Existing files will not be overwritten.
67 Existing files will not be overwritten.
68 If no filename is specified, a temporary file is created.
68 If no filename is specified, a temporary file is created.
69 """
69 """
70 fh = None
70 fh = None
71 cleanup = None
71 cleanup = None
72 try:
72 try:
73 if filename:
73 if filename:
74 if vfs:
74 if vfs:
75 fh = vfs.open(filename, "wb")
75 fh = vfs.open(filename, "wb")
76 else:
76 else:
77 # Increase default buffer size because default is usually
77 # Increase default buffer size because default is usually
78 # small (4k is common on Linux).
78 # small (4k is common on Linux).
79 fh = open(filename, "wb", 131072)
79 fh = open(filename, "wb", 131072)
80 else:
80 else:
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
83 cleanup = filename
83 cleanup = filename
84 for c in chunks:
84 for c in chunks:
85 fh.write(c)
85 fh.write(c)
86 cleanup = None
86 cleanup = None
87 return filename
87 return filename
88 finally:
88 finally:
89 if fh is not None:
89 if fh is not None:
90 fh.close()
90 fh.close()
91 if cleanup is not None:
91 if cleanup is not None:
92 if filename and vfs:
92 if filename and vfs:
93 vfs.unlink(cleanup)
93 vfs.unlink(cleanup)
94 else:
94 else:
95 os.unlink(cleanup)
95 os.unlink(cleanup)
96
96
97 class cg1unpacker(object):
97 class cg1unpacker(object):
98 """Unpacker for cg1 changegroup streams.
98 """Unpacker for cg1 changegroup streams.
99
99
100 A changegroup unpacker handles the framing of the revision data in
100 A changegroup unpacker handles the framing of the revision data in
101 the wire format. Most consumers will want to use the apply()
101 the wire format. Most consumers will want to use the apply()
102 method to add the changes from the changegroup to a repository.
102 method to add the changes from the changegroup to a repository.
103
103
104 If you're forwarding a changegroup unmodified to another consumer,
104 If you're forwarding a changegroup unmodified to another consumer,
105 use getchunks(), which returns an iterator of changegroup
105 use getchunks(), which returns an iterator of changegroup
106 chunks. This is mostly useful for cases where you need to know the
106 chunks. This is mostly useful for cases where you need to know the
107 data stream has ended by observing the end of the changegroup.
107 data stream has ended by observing the end of the changegroup.
108
108
109 deltachunk() is useful only if you're applying delta data. Most
109 deltachunk() is useful only if you're applying delta data. Most
110 consumers should prefer apply() instead.
110 consumers should prefer apply() instead.
111
111
112 A few other public methods exist. Those are used only for
112 A few other public methods exist. Those are used only for
113 bundlerepo and some debug commands - their use is discouraged.
113 bundlerepo and some debug commands - their use is discouraged.
114 """
114 """
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
116 deltaheadersize = struct.calcsize(deltaheader)
116 deltaheadersize = struct.calcsize(deltaheader)
117 version = '01'
117 version = '01'
118 _grouplistcount = 1 # One list of files after the manifests
118 _grouplistcount = 1 # One list of files after the manifests
119
119
120 def __init__(self, fh, alg, extras=None):
120 def __init__(self, fh, alg, extras=None):
121 if alg is None:
121 if alg is None:
122 alg = 'UN'
122 alg = 'UN'
123 if alg not in util.compengines.supportedbundletypes:
123 if alg not in util.compengines.supportedbundletypes:
124 raise error.Abort(_('unknown stream compression type: %s')
124 raise error.Abort(_('unknown stream compression type: %s')
125 % alg)
125 % alg)
126 if alg == 'BZ':
126 if alg == 'BZ':
127 alg = '_truncatedBZ'
127 alg = '_truncatedBZ'
128
128
129 compengine = util.compengines.forbundletype(alg)
129 compengine = util.compengines.forbundletype(alg)
130 self._stream = compengine.decompressorreader(fh)
130 self._stream = compengine.decompressorreader(fh)
131 self._type = alg
131 self._type = alg
132 self.extras = extras or {}
132 self.extras = extras or {}
133 self.callback = None
133 self.callback = None
134
134
135 # These methods (compressed, read, seek, tell) all appear to only
135 # These methods (compressed, read, seek, tell) all appear to only
136 # be used by bundlerepo, but it's a little hard to tell.
136 # be used by bundlerepo, but it's a little hard to tell.
137 def compressed(self):
137 def compressed(self):
138 return self._type is not None and self._type != 'UN'
138 return self._type is not None and self._type != 'UN'
139 def read(self, l):
139 def read(self, l):
140 return self._stream.read(l)
140 return self._stream.read(l)
141 def seek(self, pos):
141 def seek(self, pos):
142 return self._stream.seek(pos)
142 return self._stream.seek(pos)
143 def tell(self):
143 def tell(self):
144 return self._stream.tell()
144 return self._stream.tell()
145 def close(self):
145 def close(self):
146 return self._stream.close()
146 return self._stream.close()
147
147
148 def _chunklength(self):
148 def _chunklength(self):
149 d = readexactly(self._stream, 4)
149 d = readexactly(self._stream, 4)
150 l = struct.unpack(">l", d)[0]
150 l = struct.unpack(">l", d)[0]
151 if l <= 4:
151 if l <= 4:
152 if l:
152 if l:
153 raise error.Abort(_("invalid chunk length %d") % l)
153 raise error.Abort(_("invalid chunk length %d") % l)
154 return 0
154 return 0
155 if self.callback:
155 if self.callback:
156 self.callback()
156 self.callback()
157 return l - 4
157 return l - 4
158
158
159 def changelogheader(self):
159 def changelogheader(self):
160 """v10 does not have a changelog header chunk"""
160 """v10 does not have a changelog header chunk"""
161 return {}
161 return {}
162
162
163 def manifestheader(self):
163 def manifestheader(self):
164 """v10 does not have a manifest header chunk"""
164 """v10 does not have a manifest header chunk"""
165 return {}
165 return {}
166
166
167 def filelogheader(self):
167 def filelogheader(self):
168 """return the header of the filelogs chunk, v10 only has the filename"""
168 """return the header of the filelogs chunk, v10 only has the filename"""
169 l = self._chunklength()
169 l = self._chunklength()
170 if not l:
170 if not l:
171 return {}
171 return {}
172 fname = readexactly(self._stream, l)
172 fname = readexactly(self._stream, l)
173 return {'filename': fname}
173 return {'filename': fname}
174
174
175 def _deltaheader(self, headertuple, prevnode):
175 def _deltaheader(self, headertuple, prevnode):
176 node, p1, p2, cs = headertuple
176 node, p1, p2, cs = headertuple
177 if prevnode is None:
177 if prevnode is None:
178 deltabase = p1
178 deltabase = p1
179 else:
179 else:
180 deltabase = prevnode
180 deltabase = prevnode
181 flags = 0
181 flags = 0
182 return node, p1, p2, deltabase, cs, flags
182 return node, p1, p2, deltabase, cs, flags
183
183
184 def deltachunk(self, prevnode):
184 def deltachunk(self, prevnode):
185 l = self._chunklength()
185 l = self._chunklength()
186 if not l:
186 if not l:
187 return {}
187 return {}
188 headerdata = readexactly(self._stream, self.deltaheadersize)
188 headerdata = readexactly(self._stream, self.deltaheadersize)
189 header = struct.unpack(self.deltaheader, headerdata)
189 header = struct.unpack(self.deltaheader, headerdata)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
194
194
195 def getchunks(self):
195 def getchunks(self):
196 """returns all the chunks contains in the bundle
196 """returns all the chunks contains in the bundle
197
197
198 Used when you need to forward the binary stream to a file or another
198 Used when you need to forward the binary stream to a file or another
199 network API. To do so, it parse the changegroup data, otherwise it will
199 network API. To do so, it parse the changegroup data, otherwise it will
200 block in case of sshrepo because it don't know the end of the stream.
200 block in case of sshrepo because it don't know the end of the stream.
201 """
201 """
202 # an empty chunkgroup is the end of the changegroup
202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
203 # a changegroup has at least 2 chunkgroups (changelog and manifest).
203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
204 # after that, changegroup versions 1 and 2 have a series of groups
204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
205 # with one group per file. changegroup 3 has a series of directory
205 # filelogs.
206 # manifests before the files.
206 #
207 count = 0
207 # Changelog and manifestlog parts are terminated with empty chunks. The
208 emptycount = 0
208 # tree and file parts are a list of entry sections. Each entry section
209 while emptycount < self._grouplistcount:
209 # is a series of chunks terminating in an empty chunk. The list of these
210 empty = True
210 # entry sections is terminated in yet another empty chunk, so we know
211 count += 1
211 # we've reached the end of the tree/file list when we reach an empty
212 # chunk that was proceeded by no non-empty chunks.
213
214 parts = 0
215 while parts < 2 + self._grouplistcount:
216 noentries = True
212 while True:
217 while True:
213 chunk = getchunk(self)
218 chunk = getchunk(self)
214 if not chunk:
219 if not chunk:
215 if empty and count > 2:
220 # The first two empty chunks represent the end of the
216 emptycount += 1
221 # changelog and the manifestlog portions. The remaining
222 # empty chunks represent either A) the end of individual
223 # tree or file entries in the file list, or B) the end of
224 # the entire list. It's the end of the entire list if there
225 # were no entries (i.e. noentries is True).
226 if parts < 2:
227 parts += 1
228 elif noentries:
229 parts += 1
217 break
230 break
218 empty = False
231 noentries = False
219 yield chunkheader(len(chunk))
232 yield chunkheader(len(chunk))
220 pos = 0
233 pos = 0
221 while pos < len(chunk):
234 while pos < len(chunk):
222 next = pos + 2**20
235 next = pos + 2**20
223 yield chunk[pos:next]
236 yield chunk[pos:next]
224 pos = next
237 pos = next
225 yield closechunk()
238 yield closechunk()
226
239
227 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
228 # We know that we'll never have more manifests than we had
241 # We know that we'll never have more manifests than we had
229 # changesets.
242 # changesets.
230 self.callback = prog(_('manifests'), numchanges)
243 self.callback = prog(_('manifests'), numchanges)
231 # no need to check for empty manifest group here:
244 # no need to check for empty manifest group here:
232 # if the result of the merge of 1 and 2 is the same in 3 and 4,
245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
233 # no new manifest will be created and the manifest group will
246 # no new manifest will be created and the manifest group will
234 # be empty during the pull
247 # be empty during the pull
235 self.manifestheader()
248 self.manifestheader()
236 repo.manifestlog._revlog.addgroup(self, revmap, trp)
249 repo.manifestlog._revlog.addgroup(self, revmap, trp)
237 repo.ui.progress(_('manifests'), None)
250 repo.ui.progress(_('manifests'), None)
238 self.callback = None
251 self.callback = None
239
252
240 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
241 expectedtotal=None):
254 expectedtotal=None):
242 """Add the changegroup returned by source.read() to this repo.
255 """Add the changegroup returned by source.read() to this repo.
243 srctype is a string like 'push', 'pull', or 'unbundle'. url is
256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
244 the URL of the repo where this changegroup is coming from.
257 the URL of the repo where this changegroup is coming from.
245
258
246 Return an integer summarizing the change to this repo:
259 Return an integer summarizing the change to this repo:
247 - nothing changed or no source: 0
260 - nothing changed or no source: 0
248 - more heads than before: 1+added heads (2..n)
261 - more heads than before: 1+added heads (2..n)
249 - fewer heads than before: -1-removed heads (-2..-n)
262 - fewer heads than before: -1-removed heads (-2..-n)
250 - number of heads stays the same: 1
263 - number of heads stays the same: 1
251 """
264 """
252 repo = repo.unfiltered()
265 repo = repo.unfiltered()
253 def csmap(x):
266 def csmap(x):
254 repo.ui.debug("add changeset %s\n" % short(x))
267 repo.ui.debug("add changeset %s\n" % short(x))
255 return len(cl)
268 return len(cl)
256
269
257 def revmap(x):
270 def revmap(x):
258 return cl.rev(x)
271 return cl.rev(x)
259
272
260 changesets = files = revisions = 0
273 changesets = files = revisions = 0
261
274
262 try:
275 try:
263 # The transaction may already carry source information. In this
276 # The transaction may already carry source information. In this
264 # case we use the top level data. We overwrite the argument
277 # case we use the top level data. We overwrite the argument
265 # because we need to use the top level value (if they exist)
278 # because we need to use the top level value (if they exist)
266 # in this function.
279 # in this function.
267 srctype = tr.hookargs.setdefault('source', srctype)
280 srctype = tr.hookargs.setdefault('source', srctype)
268 url = tr.hookargs.setdefault('url', url)
281 url = tr.hookargs.setdefault('url', url)
269 repo.hook('prechangegroup',
282 repo.hook('prechangegroup',
270 throw=True, **pycompat.strkwargs(tr.hookargs))
283 throw=True, **pycompat.strkwargs(tr.hookargs))
271
284
272 # write changelog data to temp files so concurrent readers
285 # write changelog data to temp files so concurrent readers
273 # will not see an inconsistent view
286 # will not see an inconsistent view
274 cl = repo.changelog
287 cl = repo.changelog
275 cl.delayupdate(tr)
288 cl.delayupdate(tr)
276 oldheads = set(cl.heads())
289 oldheads = set(cl.heads())
277
290
278 trp = weakref.proxy(tr)
291 trp = weakref.proxy(tr)
279 # pull off the changeset group
292 # pull off the changeset group
280 repo.ui.status(_("adding changesets\n"))
293 repo.ui.status(_("adding changesets\n"))
281 clstart = len(cl)
294 clstart = len(cl)
282 class prog(object):
295 class prog(object):
283 def __init__(self, step, total):
296 def __init__(self, step, total):
284 self._step = step
297 self._step = step
285 self._total = total
298 self._total = total
286 self._count = 1
299 self._count = 1
287 def __call__(self):
300 def __call__(self):
288 repo.ui.progress(self._step, self._count, unit=_('chunks'),
301 repo.ui.progress(self._step, self._count, unit=_('chunks'),
289 total=self._total)
302 total=self._total)
290 self._count += 1
303 self._count += 1
291 self.callback = prog(_('changesets'), expectedtotal)
304 self.callback = prog(_('changesets'), expectedtotal)
292
305
293 efiles = set()
306 efiles = set()
294 def onchangelog(cl, node):
307 def onchangelog(cl, node):
295 efiles.update(cl.readfiles(node))
308 efiles.update(cl.readfiles(node))
296
309
297 self.changelogheader()
310 self.changelogheader()
298 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
311 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
299 efiles = len(efiles)
312 efiles = len(efiles)
300
313
301 if not cgnodes:
314 if not cgnodes:
302 repo.ui.develwarn('applied empty changegroup',
315 repo.ui.develwarn('applied empty changegroup',
303 config='empty-changegroup')
316 config='empty-changegroup')
304 clend = len(cl)
317 clend = len(cl)
305 changesets = clend - clstart
318 changesets = clend - clstart
306 repo.ui.progress(_('changesets'), None)
319 repo.ui.progress(_('changesets'), None)
307 self.callback = None
320 self.callback = None
308
321
309 # pull off the manifest group
322 # pull off the manifest group
310 repo.ui.status(_("adding manifests\n"))
323 repo.ui.status(_("adding manifests\n"))
311 self._unpackmanifests(repo, revmap, trp, prog, changesets)
324 self._unpackmanifests(repo, revmap, trp, prog, changesets)
312
325
313 needfiles = {}
326 needfiles = {}
314 if repo.ui.configbool('server', 'validate'):
327 if repo.ui.configbool('server', 'validate'):
315 cl = repo.changelog
328 cl = repo.changelog
316 ml = repo.manifestlog
329 ml = repo.manifestlog
317 # validate incoming csets have their manifests
330 # validate incoming csets have their manifests
318 for cset in xrange(clstart, clend):
331 for cset in xrange(clstart, clend):
319 mfnode = cl.changelogrevision(cset).manifest
332 mfnode = cl.changelogrevision(cset).manifest
320 mfest = ml[mfnode].readdelta()
333 mfest = ml[mfnode].readdelta()
321 # store file cgnodes we must see
334 # store file cgnodes we must see
322 for f, n in mfest.iteritems():
335 for f, n in mfest.iteritems():
323 needfiles.setdefault(f, set()).add(n)
336 needfiles.setdefault(f, set()).add(n)
324
337
325 # process the files
338 # process the files
326 repo.ui.status(_("adding file changes\n"))
339 repo.ui.status(_("adding file changes\n"))
327 newrevs, newfiles = _addchangegroupfiles(
340 newrevs, newfiles = _addchangegroupfiles(
328 repo, self, revmap, trp, efiles, needfiles)
341 repo, self, revmap, trp, efiles, needfiles)
329 revisions += newrevs
342 revisions += newrevs
330 files += newfiles
343 files += newfiles
331
344
332 deltaheads = 0
345 deltaheads = 0
333 if oldheads:
346 if oldheads:
334 heads = cl.heads()
347 heads = cl.heads()
335 deltaheads = len(heads) - len(oldheads)
348 deltaheads = len(heads) - len(oldheads)
336 for h in heads:
349 for h in heads:
337 if h not in oldheads and repo[h].closesbranch():
350 if h not in oldheads and repo[h].closesbranch():
338 deltaheads -= 1
351 deltaheads -= 1
339 htext = ""
352 htext = ""
340 if deltaheads:
353 if deltaheads:
341 htext = _(" (%+d heads)") % deltaheads
354 htext = _(" (%+d heads)") % deltaheads
342
355
343 repo.ui.status(_("added %d changesets"
356 repo.ui.status(_("added %d changesets"
344 " with %d changes to %d files%s\n")
357 " with %d changes to %d files%s\n")
345 % (changesets, revisions, files, htext))
358 % (changesets, revisions, files, htext))
346 repo.invalidatevolatilesets()
359 repo.invalidatevolatilesets()
347
360
348 if changesets > 0:
361 if changesets > 0:
349 if 'node' not in tr.hookargs:
362 if 'node' not in tr.hookargs:
350 tr.hookargs['node'] = hex(cl.node(clstart))
363 tr.hookargs['node'] = hex(cl.node(clstart))
351 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
364 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
352 hookargs = dict(tr.hookargs)
365 hookargs = dict(tr.hookargs)
353 else:
366 else:
354 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
355 hookargs['node'] = hex(cl.node(clstart))
368 hookargs['node'] = hex(cl.node(clstart))
356 hookargs['node_last'] = hex(cl.node(clend - 1))
369 hookargs['node_last'] = hex(cl.node(clend - 1))
357 repo.hook('pretxnchangegroup',
370 repo.hook('pretxnchangegroup',
358 throw=True, **pycompat.strkwargs(hookargs))
371 throw=True, **pycompat.strkwargs(hookargs))
359
372
360 added = [cl.node(r) for r in xrange(clstart, clend)]
373 added = [cl.node(r) for r in xrange(clstart, clend)]
361 phaseall = None
374 phaseall = None
362 if srctype in ('push', 'serve'):
375 if srctype in ('push', 'serve'):
363 # Old servers can not push the boundary themselves.
376 # Old servers can not push the boundary themselves.
364 # New servers won't push the boundary if changeset already
377 # New servers won't push the boundary if changeset already
365 # exists locally as secret
378 # exists locally as secret
366 #
379 #
367 # We should not use added here but the list of all change in
380 # We should not use added here but the list of all change in
368 # the bundle
381 # the bundle
369 if repo.publishing():
382 if repo.publishing():
370 targetphase = phaseall = phases.public
383 targetphase = phaseall = phases.public
371 else:
384 else:
372 # closer target phase computation
385 # closer target phase computation
373
386
374 # Those changesets have been pushed from the
387 # Those changesets have been pushed from the
375 # outside, their phases are going to be pushed
388 # outside, their phases are going to be pushed
376 # alongside. Therefor `targetphase` is
389 # alongside. Therefor `targetphase` is
377 # ignored.
390 # ignored.
378 targetphase = phaseall = phases.draft
391 targetphase = phaseall = phases.draft
379 if added:
392 if added:
380 phases.registernew(repo, tr, targetphase, added)
393 phases.registernew(repo, tr, targetphase, added)
381 if phaseall is not None:
394 if phaseall is not None:
382 phases.advanceboundary(repo, tr, phaseall, cgnodes)
395 phases.advanceboundary(repo, tr, phaseall, cgnodes)
383
396
384 if changesets > 0:
397 if changesets > 0:
385
398
386 def runhooks():
399 def runhooks():
387 # These hooks run when the lock releases, not when the
400 # These hooks run when the lock releases, not when the
388 # transaction closes. So it's possible for the changelog
401 # transaction closes. So it's possible for the changelog
389 # to have changed since we last saw it.
402 # to have changed since we last saw it.
390 if clstart >= len(repo):
403 if clstart >= len(repo):
391 return
404 return
392
405
393 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
406 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
394
407
395 for n in added:
408 for n in added:
396 args = hookargs.copy()
409 args = hookargs.copy()
397 args['node'] = hex(n)
410 args['node'] = hex(n)
398 del args['node_last']
411 del args['node_last']
399 repo.hook("incoming", **pycompat.strkwargs(args))
412 repo.hook("incoming", **pycompat.strkwargs(args))
400
413
401 newheads = [h for h in repo.heads()
414 newheads = [h for h in repo.heads()
402 if h not in oldheads]
415 if h not in oldheads]
403 repo.ui.log("incoming",
416 repo.ui.log("incoming",
404 "%s incoming changes - new heads: %s\n",
417 "%s incoming changes - new heads: %s\n",
405 len(added),
418 len(added),
406 ', '.join([hex(c[:6]) for c in newheads]))
419 ', '.join([hex(c[:6]) for c in newheads]))
407
420
408 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
421 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
409 lambda tr: repo._afterlock(runhooks))
422 lambda tr: repo._afterlock(runhooks))
410 finally:
423 finally:
411 repo.ui.flush()
424 repo.ui.flush()
412 # never return 0 here:
425 # never return 0 here:
413 if deltaheads < 0:
426 if deltaheads < 0:
414 ret = deltaheads - 1
427 ret = deltaheads - 1
415 else:
428 else:
416 ret = deltaheads + 1
429 ret = deltaheads + 1
417 return ret
430 return ret
418
431
419 class cg2unpacker(cg1unpacker):
432 class cg2unpacker(cg1unpacker):
420 """Unpacker for cg2 streams.
433 """Unpacker for cg2 streams.
421
434
422 cg2 streams add support for generaldelta, so the delta header
435 cg2 streams add support for generaldelta, so the delta header
423 format is slightly different. All other features about the data
436 format is slightly different. All other features about the data
424 remain the same.
437 remain the same.
425 """
438 """
426 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
439 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
427 deltaheadersize = struct.calcsize(deltaheader)
440 deltaheadersize = struct.calcsize(deltaheader)
428 version = '02'
441 version = '02'
429
442
430 def _deltaheader(self, headertuple, prevnode):
443 def _deltaheader(self, headertuple, prevnode):
431 node, p1, p2, deltabase, cs = headertuple
444 node, p1, p2, deltabase, cs = headertuple
432 flags = 0
445 flags = 0
433 return node, p1, p2, deltabase, cs, flags
446 return node, p1, p2, deltabase, cs, flags
434
447
435 class cg3unpacker(cg2unpacker):
448 class cg3unpacker(cg2unpacker):
436 """Unpacker for cg3 streams.
449 """Unpacker for cg3 streams.
437
450
438 cg3 streams add support for exchanging treemanifests and revlog
451 cg3 streams add support for exchanging treemanifests and revlog
439 flags. It adds the revlog flags to the delta header and an empty chunk
452 flags. It adds the revlog flags to the delta header and an empty chunk
440 separating manifests and files.
453 separating manifests and files.
441 """
454 """
442 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
455 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
443 deltaheadersize = struct.calcsize(deltaheader)
456 deltaheadersize = struct.calcsize(deltaheader)
444 version = '03'
457 version = '03'
445 _grouplistcount = 2 # One list of manifests and one list of files
458 _grouplistcount = 2 # One list of manifests and one list of files
446
459
447 def _deltaheader(self, headertuple, prevnode):
460 def _deltaheader(self, headertuple, prevnode):
448 node, p1, p2, deltabase, cs, flags = headertuple
461 node, p1, p2, deltabase, cs, flags = headertuple
449 return node, p1, p2, deltabase, cs, flags
462 return node, p1, p2, deltabase, cs, flags
450
463
451 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
464 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
452 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
465 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
453 numchanges)
466 numchanges)
454 for chunkdata in iter(self.filelogheader, {}):
467 for chunkdata in iter(self.filelogheader, {}):
455 # If we get here, there are directory manifests in the changegroup
468 # If we get here, there are directory manifests in the changegroup
456 d = chunkdata["filename"]
469 d = chunkdata["filename"]
457 repo.ui.debug("adding %s revisions\n" % d)
470 repo.ui.debug("adding %s revisions\n" % d)
458 dirlog = repo.manifestlog._revlog.dirlog(d)
471 dirlog = repo.manifestlog._revlog.dirlog(d)
459 if not dirlog.addgroup(self, revmap, trp):
472 if not dirlog.addgroup(self, revmap, trp):
460 raise error.Abort(_("received dir revlog group is empty"))
473 raise error.Abort(_("received dir revlog group is empty"))
461
474
462 class headerlessfixup(object):
475 class headerlessfixup(object):
463 def __init__(self, fh, h):
476 def __init__(self, fh, h):
464 self._h = h
477 self._h = h
465 self._fh = fh
478 self._fh = fh
466 def read(self, n):
479 def read(self, n):
467 if self._h:
480 if self._h:
468 d, self._h = self._h[:n], self._h[n:]
481 d, self._h = self._h[:n], self._h[n:]
469 if len(d) < n:
482 if len(d) < n:
470 d += readexactly(self._fh, n - len(d))
483 d += readexactly(self._fh, n - len(d))
471 return d
484 return d
472 return readexactly(self._fh, n)
485 return readexactly(self._fh, n)
473
486
474 class cg1packer(object):
487 class cg1packer(object):
475 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
488 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
476 version = '01'
489 version = '01'
477 def __init__(self, repo, bundlecaps=None):
490 def __init__(self, repo, bundlecaps=None):
478 """Given a source repo, construct a bundler.
491 """Given a source repo, construct a bundler.
479
492
480 bundlecaps is optional and can be used to specify the set of
493 bundlecaps is optional and can be used to specify the set of
481 capabilities which can be used to build the bundle. While bundlecaps is
494 capabilities which can be used to build the bundle. While bundlecaps is
482 unused in core Mercurial, extensions rely on this feature to communicate
495 unused in core Mercurial, extensions rely on this feature to communicate
483 capabilities to customize the changegroup packer.
496 capabilities to customize the changegroup packer.
484 """
497 """
485 # Set of capabilities we can use to build the bundle.
498 # Set of capabilities we can use to build the bundle.
486 if bundlecaps is None:
499 if bundlecaps is None:
487 bundlecaps = set()
500 bundlecaps = set()
488 self._bundlecaps = bundlecaps
501 self._bundlecaps = bundlecaps
489 # experimental config: bundle.reorder
502 # experimental config: bundle.reorder
490 reorder = repo.ui.config('bundle', 'reorder')
503 reorder = repo.ui.config('bundle', 'reorder')
491 if reorder == 'auto':
504 if reorder == 'auto':
492 reorder = None
505 reorder = None
493 else:
506 else:
494 reorder = util.parsebool(reorder)
507 reorder = util.parsebool(reorder)
495 self._repo = repo
508 self._repo = repo
496 self._reorder = reorder
509 self._reorder = reorder
497 self._progress = repo.ui.progress
510 self._progress = repo.ui.progress
498 if self._repo.ui.verbose and not self._repo.ui.debugflag:
511 if self._repo.ui.verbose and not self._repo.ui.debugflag:
499 self._verbosenote = self._repo.ui.note
512 self._verbosenote = self._repo.ui.note
500 else:
513 else:
501 self._verbosenote = lambda s: None
514 self._verbosenote = lambda s: None
502
515
503 def close(self):
516 def close(self):
504 return closechunk()
517 return closechunk()
505
518
506 def fileheader(self, fname):
519 def fileheader(self, fname):
507 return chunkheader(len(fname)) + fname
520 return chunkheader(len(fname)) + fname
508
521
509 # Extracted both for clarity and for overriding in extensions.
522 # Extracted both for clarity and for overriding in extensions.
510 def _sortgroup(self, revlog, nodelist, lookup):
523 def _sortgroup(self, revlog, nodelist, lookup):
511 """Sort nodes for change group and turn them into revnums."""
524 """Sort nodes for change group and turn them into revnums."""
512 # for generaldelta revlogs, we linearize the revs; this will both be
525 # for generaldelta revlogs, we linearize the revs; this will both be
513 # much quicker and generate a much smaller bundle
526 # much quicker and generate a much smaller bundle
514 if (revlog._generaldelta and self._reorder is None) or self._reorder:
527 if (revlog._generaldelta and self._reorder is None) or self._reorder:
515 dag = dagutil.revlogdag(revlog)
528 dag = dagutil.revlogdag(revlog)
516 return dag.linearize(set(revlog.rev(n) for n in nodelist))
529 return dag.linearize(set(revlog.rev(n) for n in nodelist))
517 else:
530 else:
518 return sorted([revlog.rev(n) for n in nodelist])
531 return sorted([revlog.rev(n) for n in nodelist])
519
532
520 def group(self, nodelist, revlog, lookup, units=None):
533 def group(self, nodelist, revlog, lookup, units=None):
521 """Calculate a delta group, yielding a sequence of changegroup chunks
534 """Calculate a delta group, yielding a sequence of changegroup chunks
522 (strings).
535 (strings).
523
536
524 Given a list of changeset revs, return a set of deltas and
537 Given a list of changeset revs, return a set of deltas and
525 metadata corresponding to nodes. The first delta is
538 metadata corresponding to nodes. The first delta is
526 first parent(nodelist[0]) -> nodelist[0], the receiver is
539 first parent(nodelist[0]) -> nodelist[0], the receiver is
527 guaranteed to have this parent as it has all history before
540 guaranteed to have this parent as it has all history before
528 these changesets. In the case firstparent is nullrev the
541 these changesets. In the case firstparent is nullrev the
529 changegroup starts with a full revision.
542 changegroup starts with a full revision.
530
543
531 If units is not None, progress detail will be generated, units specifies
544 If units is not None, progress detail will be generated, units specifies
532 the type of revlog that is touched (changelog, manifest, etc.).
545 the type of revlog that is touched (changelog, manifest, etc.).
533 """
546 """
534 # if we don't have any revisions touched by these changesets, bail
547 # if we don't have any revisions touched by these changesets, bail
535 if len(nodelist) == 0:
548 if len(nodelist) == 0:
536 yield self.close()
549 yield self.close()
537 return
550 return
538
551
539 revs = self._sortgroup(revlog, nodelist, lookup)
552 revs = self._sortgroup(revlog, nodelist, lookup)
540
553
541 # add the parent of the first rev
554 # add the parent of the first rev
542 p = revlog.parentrevs(revs[0])[0]
555 p = revlog.parentrevs(revs[0])[0]
543 revs.insert(0, p)
556 revs.insert(0, p)
544
557
545 # build deltas
558 # build deltas
546 total = len(revs) - 1
559 total = len(revs) - 1
547 msgbundling = _('bundling')
560 msgbundling = _('bundling')
548 for r in xrange(len(revs) - 1):
561 for r in xrange(len(revs) - 1):
549 if units is not None:
562 if units is not None:
550 self._progress(msgbundling, r + 1, unit=units, total=total)
563 self._progress(msgbundling, r + 1, unit=units, total=total)
551 prev, curr = revs[r], revs[r + 1]
564 prev, curr = revs[r], revs[r + 1]
552 linknode = lookup(revlog.node(curr))
565 linknode = lookup(revlog.node(curr))
553 for c in self.revchunk(revlog, curr, prev, linknode):
566 for c in self.revchunk(revlog, curr, prev, linknode):
554 yield c
567 yield c
555
568
556 if units is not None:
569 if units is not None:
557 self._progress(msgbundling, None)
570 self._progress(msgbundling, None)
558 yield self.close()
571 yield self.close()
559
572
560 # filter any nodes that claim to be part of the known set
573 # filter any nodes that claim to be part of the known set
561 def prune(self, revlog, missing, commonrevs):
574 def prune(self, revlog, missing, commonrevs):
562 rr, rl = revlog.rev, revlog.linkrev
575 rr, rl = revlog.rev, revlog.linkrev
563 return [n for n in missing if rl(rr(n)) not in commonrevs]
576 return [n for n in missing if rl(rr(n)) not in commonrevs]
564
577
565 def _packmanifests(self, dir, mfnodes, lookuplinknode):
578 def _packmanifests(self, dir, mfnodes, lookuplinknode):
566 """Pack flat manifests into a changegroup stream."""
579 """Pack flat manifests into a changegroup stream."""
567 assert not dir
580 assert not dir
568 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
581 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
569 lookuplinknode, units=_('manifests')):
582 lookuplinknode, units=_('manifests')):
570 yield chunk
583 yield chunk
571
584
572 def _manifestsdone(self):
585 def _manifestsdone(self):
573 return ''
586 return ''
574
587
575 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
588 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
576 '''yield a sequence of changegroup chunks (strings)'''
589 '''yield a sequence of changegroup chunks (strings)'''
577 repo = self._repo
590 repo = self._repo
578 cl = repo.changelog
591 cl = repo.changelog
579
592
580 clrevorder = {}
593 clrevorder = {}
581 mfs = {} # needed manifests
594 mfs = {} # needed manifests
582 fnodes = {} # needed file nodes
595 fnodes = {} # needed file nodes
583 changedfiles = set()
596 changedfiles = set()
584
597
585 # Callback for the changelog, used to collect changed files and manifest
598 # Callback for the changelog, used to collect changed files and manifest
586 # nodes.
599 # nodes.
587 # Returns the linkrev node (identity in the changelog case).
600 # Returns the linkrev node (identity in the changelog case).
588 def lookupcl(x):
601 def lookupcl(x):
589 c = cl.read(x)
602 c = cl.read(x)
590 clrevorder[x] = len(clrevorder)
603 clrevorder[x] = len(clrevorder)
591 n = c[0]
604 n = c[0]
592 # record the first changeset introducing this manifest version
605 # record the first changeset introducing this manifest version
593 mfs.setdefault(n, x)
606 mfs.setdefault(n, x)
594 # Record a complete list of potentially-changed files in
607 # Record a complete list of potentially-changed files in
595 # this manifest.
608 # this manifest.
596 changedfiles.update(c[3])
609 changedfiles.update(c[3])
597 return x
610 return x
598
611
599 self._verbosenote(_('uncompressed size of bundle content:\n'))
612 self._verbosenote(_('uncompressed size of bundle content:\n'))
600 size = 0
613 size = 0
601 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
614 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
602 size += len(chunk)
615 size += len(chunk)
603 yield chunk
616 yield chunk
604 self._verbosenote(_('%8.i (changelog)\n') % size)
617 self._verbosenote(_('%8.i (changelog)\n') % size)
605
618
606 # We need to make sure that the linkrev in the changegroup refers to
619 # We need to make sure that the linkrev in the changegroup refers to
607 # the first changeset that introduced the manifest or file revision.
620 # the first changeset that introduced the manifest or file revision.
608 # The fastpath is usually safer than the slowpath, because the filelogs
621 # The fastpath is usually safer than the slowpath, because the filelogs
609 # are walked in revlog order.
622 # are walked in revlog order.
610 #
623 #
611 # When taking the slowpath with reorder=None and the manifest revlog
624 # When taking the slowpath with reorder=None and the manifest revlog
612 # uses generaldelta, the manifest may be walked in the "wrong" order.
625 # uses generaldelta, the manifest may be walked in the "wrong" order.
613 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
626 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
614 # cc0ff93d0c0c).
627 # cc0ff93d0c0c).
615 #
628 #
616 # When taking the fastpath, we are only vulnerable to reordering
629 # When taking the fastpath, we are only vulnerable to reordering
617 # of the changelog itself. The changelog never uses generaldelta, so
630 # of the changelog itself. The changelog never uses generaldelta, so
618 # it is only reordered when reorder=True. To handle this case, we
631 # it is only reordered when reorder=True. To handle this case, we
619 # simply take the slowpath, which already has the 'clrevorder' logic.
632 # simply take the slowpath, which already has the 'clrevorder' logic.
620 # This was also fixed in cc0ff93d0c0c.
633 # This was also fixed in cc0ff93d0c0c.
621 fastpathlinkrev = fastpathlinkrev and not self._reorder
634 fastpathlinkrev = fastpathlinkrev and not self._reorder
622 # Treemanifests don't work correctly with fastpathlinkrev
635 # Treemanifests don't work correctly with fastpathlinkrev
623 # either, because we don't discover which directory nodes to
636 # either, because we don't discover which directory nodes to
624 # send along with files. This could probably be fixed.
637 # send along with files. This could probably be fixed.
625 fastpathlinkrev = fastpathlinkrev and (
638 fastpathlinkrev = fastpathlinkrev and (
626 'treemanifest' not in repo.requirements)
639 'treemanifest' not in repo.requirements)
627
640
628 for chunk in self.generatemanifests(commonrevs, clrevorder,
641 for chunk in self.generatemanifests(commonrevs, clrevorder,
629 fastpathlinkrev, mfs, fnodes):
642 fastpathlinkrev, mfs, fnodes):
630 yield chunk
643 yield chunk
631 mfs.clear()
644 mfs.clear()
632 clrevs = set(cl.rev(x) for x in clnodes)
645 clrevs = set(cl.rev(x) for x in clnodes)
633
646
634 if not fastpathlinkrev:
647 if not fastpathlinkrev:
635 def linknodes(unused, fname):
648 def linknodes(unused, fname):
636 return fnodes.get(fname, {})
649 return fnodes.get(fname, {})
637 else:
650 else:
638 cln = cl.node
651 cln = cl.node
639 def linknodes(filerevlog, fname):
652 def linknodes(filerevlog, fname):
640 llr = filerevlog.linkrev
653 llr = filerevlog.linkrev
641 fln = filerevlog.node
654 fln = filerevlog.node
642 revs = ((r, llr(r)) for r in filerevlog)
655 revs = ((r, llr(r)) for r in filerevlog)
643 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
656 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
644
657
645 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
658 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
646 source):
659 source):
647 yield chunk
660 yield chunk
648
661
649 yield self.close()
662 yield self.close()
650
663
651 if clnodes:
664 if clnodes:
652 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
665 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
653
666
654 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
667 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
655 fnodes):
668 fnodes):
656 repo = self._repo
669 repo = self._repo
657 mfl = repo.manifestlog
670 mfl = repo.manifestlog
658 dirlog = mfl._revlog.dirlog
671 dirlog = mfl._revlog.dirlog
659 tmfnodes = {'': mfs}
672 tmfnodes = {'': mfs}
660
673
661 # Callback for the manifest, used to collect linkrevs for filelog
674 # Callback for the manifest, used to collect linkrevs for filelog
662 # revisions.
675 # revisions.
663 # Returns the linkrev node (collected in lookupcl).
676 # Returns the linkrev node (collected in lookupcl).
664 def makelookupmflinknode(dir):
677 def makelookupmflinknode(dir):
665 if fastpathlinkrev:
678 if fastpathlinkrev:
666 assert not dir
679 assert not dir
667 return mfs.__getitem__
680 return mfs.__getitem__
668
681
669 def lookupmflinknode(x):
682 def lookupmflinknode(x):
670 """Callback for looking up the linknode for manifests.
683 """Callback for looking up the linknode for manifests.
671
684
672 Returns the linkrev node for the specified manifest.
685 Returns the linkrev node for the specified manifest.
673
686
674 SIDE EFFECT:
687 SIDE EFFECT:
675
688
676 1) fclnodes gets populated with the list of relevant
689 1) fclnodes gets populated with the list of relevant
677 file nodes if we're not using fastpathlinkrev
690 file nodes if we're not using fastpathlinkrev
678 2) When treemanifests are in use, collects treemanifest nodes
691 2) When treemanifests are in use, collects treemanifest nodes
679 to send
692 to send
680
693
681 Note that this means manifests must be completely sent to
694 Note that this means manifests must be completely sent to
682 the client before you can trust the list of files and
695 the client before you can trust the list of files and
683 treemanifests to send.
696 treemanifests to send.
684 """
697 """
685 clnode = tmfnodes[dir][x]
698 clnode = tmfnodes[dir][x]
686 mdata = mfl.get(dir, x).readfast(shallow=True)
699 mdata = mfl.get(dir, x).readfast(shallow=True)
687 for p, n, fl in mdata.iterentries():
700 for p, n, fl in mdata.iterentries():
688 if fl == 't': # subdirectory manifest
701 if fl == 't': # subdirectory manifest
689 subdir = dir + p + '/'
702 subdir = dir + p + '/'
690 tmfclnodes = tmfnodes.setdefault(subdir, {})
703 tmfclnodes = tmfnodes.setdefault(subdir, {})
691 tmfclnode = tmfclnodes.setdefault(n, clnode)
704 tmfclnode = tmfclnodes.setdefault(n, clnode)
692 if clrevorder[clnode] < clrevorder[tmfclnode]:
705 if clrevorder[clnode] < clrevorder[tmfclnode]:
693 tmfclnodes[n] = clnode
706 tmfclnodes[n] = clnode
694 else:
707 else:
695 f = dir + p
708 f = dir + p
696 fclnodes = fnodes.setdefault(f, {})
709 fclnodes = fnodes.setdefault(f, {})
697 fclnode = fclnodes.setdefault(n, clnode)
710 fclnode = fclnodes.setdefault(n, clnode)
698 if clrevorder[clnode] < clrevorder[fclnode]:
711 if clrevorder[clnode] < clrevorder[fclnode]:
699 fclnodes[n] = clnode
712 fclnodes[n] = clnode
700 return clnode
713 return clnode
701 return lookupmflinknode
714 return lookupmflinknode
702
715
703 size = 0
716 size = 0
704 while tmfnodes:
717 while tmfnodes:
705 dir = min(tmfnodes)
718 dir = min(tmfnodes)
706 nodes = tmfnodes[dir]
719 nodes = tmfnodes[dir]
707 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
720 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
708 if not dir or prunednodes:
721 if not dir or prunednodes:
709 for x in self._packmanifests(dir, prunednodes,
722 for x in self._packmanifests(dir, prunednodes,
710 makelookupmflinknode(dir)):
723 makelookupmflinknode(dir)):
711 size += len(x)
724 size += len(x)
712 yield x
725 yield x
713 del tmfnodes[dir]
726 del tmfnodes[dir]
714 self._verbosenote(_('%8.i (manifests)\n') % size)
727 self._verbosenote(_('%8.i (manifests)\n') % size)
715 yield self._manifestsdone()
728 yield self._manifestsdone()
716
729
717 # The 'source' parameter is useful for extensions
730 # The 'source' parameter is useful for extensions
718 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
731 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
719 repo = self._repo
732 repo = self._repo
720 progress = self._progress
733 progress = self._progress
721 msgbundling = _('bundling')
734 msgbundling = _('bundling')
722
735
723 total = len(changedfiles)
736 total = len(changedfiles)
724 # for progress output
737 # for progress output
725 msgfiles = _('files')
738 msgfiles = _('files')
726 for i, fname in enumerate(sorted(changedfiles)):
739 for i, fname in enumerate(sorted(changedfiles)):
727 filerevlog = repo.file(fname)
740 filerevlog = repo.file(fname)
728 if not filerevlog:
741 if not filerevlog:
729 raise error.Abort(_("empty or missing revlog for %s") % fname)
742 raise error.Abort(_("empty or missing revlog for %s") % fname)
730
743
731 linkrevnodes = linknodes(filerevlog, fname)
744 linkrevnodes = linknodes(filerevlog, fname)
732 # Lookup for filenodes, we collected the linkrev nodes above in the
745 # Lookup for filenodes, we collected the linkrev nodes above in the
733 # fastpath case and with lookupmf in the slowpath case.
746 # fastpath case and with lookupmf in the slowpath case.
734 def lookupfilelog(x):
747 def lookupfilelog(x):
735 return linkrevnodes[x]
748 return linkrevnodes[x]
736
749
737 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
750 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
738 if filenodes:
751 if filenodes:
739 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
752 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
740 total=total)
753 total=total)
741 h = self.fileheader(fname)
754 h = self.fileheader(fname)
742 size = len(h)
755 size = len(h)
743 yield h
756 yield h
744 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
757 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
745 size += len(chunk)
758 size += len(chunk)
746 yield chunk
759 yield chunk
747 self._verbosenote(_('%8.i %s\n') % (size, fname))
760 self._verbosenote(_('%8.i %s\n') % (size, fname))
748 progress(msgbundling, None)
761 progress(msgbundling, None)
749
762
750 def deltaparent(self, revlog, rev, p1, p2, prev):
763 def deltaparent(self, revlog, rev, p1, p2, prev):
751 return prev
764 return prev
752
765
753 def revchunk(self, revlog, rev, prev, linknode):
766 def revchunk(self, revlog, rev, prev, linknode):
754 node = revlog.node(rev)
767 node = revlog.node(rev)
755 p1, p2 = revlog.parentrevs(rev)
768 p1, p2 = revlog.parentrevs(rev)
756 base = self.deltaparent(revlog, rev, p1, p2, prev)
769 base = self.deltaparent(revlog, rev, p1, p2, prev)
757
770
758 prefix = ''
771 prefix = ''
759 if revlog.iscensored(base) or revlog.iscensored(rev):
772 if revlog.iscensored(base) or revlog.iscensored(rev):
760 try:
773 try:
761 delta = revlog.revision(node, raw=True)
774 delta = revlog.revision(node, raw=True)
762 except error.CensoredNodeError as e:
775 except error.CensoredNodeError as e:
763 delta = e.tombstone
776 delta = e.tombstone
764 if base == nullrev:
777 if base == nullrev:
765 prefix = mdiff.trivialdiffheader(len(delta))
778 prefix = mdiff.trivialdiffheader(len(delta))
766 else:
779 else:
767 baselen = revlog.rawsize(base)
780 baselen = revlog.rawsize(base)
768 prefix = mdiff.replacediffheader(baselen, len(delta))
781 prefix = mdiff.replacediffheader(baselen, len(delta))
769 elif base == nullrev:
782 elif base == nullrev:
770 delta = revlog.revision(node, raw=True)
783 delta = revlog.revision(node, raw=True)
771 prefix = mdiff.trivialdiffheader(len(delta))
784 prefix = mdiff.trivialdiffheader(len(delta))
772 else:
785 else:
773 delta = revlog.revdiff(base, rev)
786 delta = revlog.revdiff(base, rev)
774 p1n, p2n = revlog.parents(node)
787 p1n, p2n = revlog.parents(node)
775 basenode = revlog.node(base)
788 basenode = revlog.node(base)
776 flags = revlog.flags(rev)
789 flags = revlog.flags(rev)
777 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
790 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
778 meta += prefix
791 meta += prefix
779 l = len(meta) + len(delta)
792 l = len(meta) + len(delta)
780 yield chunkheader(l)
793 yield chunkheader(l)
781 yield meta
794 yield meta
782 yield delta
795 yield delta
783 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
796 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
784 # do nothing with basenode, it is implicitly the previous one in HG10
797 # do nothing with basenode, it is implicitly the previous one in HG10
785 # do nothing with flags, it is implicitly 0 for cg1 and cg2
798 # do nothing with flags, it is implicitly 0 for cg1 and cg2
786 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
799 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
787
800
788 class cg2packer(cg1packer):
801 class cg2packer(cg1packer):
789 version = '02'
802 version = '02'
790 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
803 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
791
804
792 def __init__(self, repo, bundlecaps=None):
805 def __init__(self, repo, bundlecaps=None):
793 super(cg2packer, self).__init__(repo, bundlecaps)
806 super(cg2packer, self).__init__(repo, bundlecaps)
794 if self._reorder is None:
807 if self._reorder is None:
795 # Since generaldelta is directly supported by cg2, reordering
808 # Since generaldelta is directly supported by cg2, reordering
796 # generally doesn't help, so we disable it by default (treating
809 # generally doesn't help, so we disable it by default (treating
797 # bundle.reorder=auto just like bundle.reorder=False).
810 # bundle.reorder=auto just like bundle.reorder=False).
798 self._reorder = False
811 self._reorder = False
799
812
800 def deltaparent(self, revlog, rev, p1, p2, prev):
813 def deltaparent(self, revlog, rev, p1, p2, prev):
801 dp = revlog.deltaparent(rev)
814 dp = revlog.deltaparent(rev)
802 if dp == nullrev and revlog.storedeltachains:
815 if dp == nullrev and revlog.storedeltachains:
803 # Avoid sending full revisions when delta parent is null. Pick prev
816 # Avoid sending full revisions when delta parent is null. Pick prev
804 # in that case. It's tempting to pick p1 in this case, as p1 will
817 # in that case. It's tempting to pick p1 in this case, as p1 will
805 # be smaller in the common case. However, computing a delta against
818 # be smaller in the common case. However, computing a delta against
806 # p1 may require resolving the raw text of p1, which could be
819 # p1 may require resolving the raw text of p1, which could be
807 # expensive. The revlog caches should have prev cached, meaning
820 # expensive. The revlog caches should have prev cached, meaning
808 # less CPU for changegroup generation. There is likely room to add
821 # less CPU for changegroup generation. There is likely room to add
809 # a flag and/or config option to control this behavior.
822 # a flag and/or config option to control this behavior.
810 return prev
823 return prev
811 elif dp == nullrev:
824 elif dp == nullrev:
812 # revlog is configured to use full snapshot for a reason,
825 # revlog is configured to use full snapshot for a reason,
813 # stick to full snapshot.
826 # stick to full snapshot.
814 return nullrev
827 return nullrev
815 elif dp not in (p1, p2, prev):
828 elif dp not in (p1, p2, prev):
816 # Pick prev when we can't be sure remote has the base revision.
829 # Pick prev when we can't be sure remote has the base revision.
817 return prev
830 return prev
818 else:
831 else:
819 return dp
832 return dp
820
833
821 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
834 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
822 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
835 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
823 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
836 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
824
837
825 class cg3packer(cg2packer):
838 class cg3packer(cg2packer):
826 version = '03'
839 version = '03'
827 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
840 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
828
841
829 def _packmanifests(self, dir, mfnodes, lookuplinknode):
842 def _packmanifests(self, dir, mfnodes, lookuplinknode):
830 if dir:
843 if dir:
831 yield self.fileheader(dir)
844 yield self.fileheader(dir)
832
845
833 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
846 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
834 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
847 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
835 units=_('manifests')):
848 units=_('manifests')):
836 yield chunk
849 yield chunk
837
850
838 def _manifestsdone(self):
851 def _manifestsdone(self):
839 return self.close()
852 return self.close()
840
853
841 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
854 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
842 return struct.pack(
855 return struct.pack(
843 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
856 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
844
857
845 _packermap = {'01': (cg1packer, cg1unpacker),
858 _packermap = {'01': (cg1packer, cg1unpacker),
846 # cg2 adds support for exchanging generaldelta
859 # cg2 adds support for exchanging generaldelta
847 '02': (cg2packer, cg2unpacker),
860 '02': (cg2packer, cg2unpacker),
848 # cg3 adds support for exchanging revlog flags and treemanifests
861 # cg3 adds support for exchanging revlog flags and treemanifests
849 '03': (cg3packer, cg3unpacker),
862 '03': (cg3packer, cg3unpacker),
850 }
863 }
851
864
852 def allsupportedversions(repo):
865 def allsupportedversions(repo):
853 versions = set(_packermap.keys())
866 versions = set(_packermap.keys())
854 if not (repo.ui.configbool('experimental', 'changegroup3') or
867 if not (repo.ui.configbool('experimental', 'changegroup3') or
855 repo.ui.configbool('experimental', 'treemanifest') or
868 repo.ui.configbool('experimental', 'treemanifest') or
856 'treemanifest' in repo.requirements):
869 'treemanifest' in repo.requirements):
857 versions.discard('03')
870 versions.discard('03')
858 return versions
871 return versions
859
872
860 # Changegroup versions that can be applied to the repo
873 # Changegroup versions that can be applied to the repo
861 def supportedincomingversions(repo):
874 def supportedincomingversions(repo):
862 return allsupportedversions(repo)
875 return allsupportedversions(repo)
863
876
864 # Changegroup versions that can be created from the repo
877 # Changegroup versions that can be created from the repo
865 def supportedoutgoingversions(repo):
878 def supportedoutgoingversions(repo):
866 versions = allsupportedversions(repo)
879 versions = allsupportedversions(repo)
867 if 'treemanifest' in repo.requirements:
880 if 'treemanifest' in repo.requirements:
868 # Versions 01 and 02 support only flat manifests and it's just too
881 # Versions 01 and 02 support only flat manifests and it's just too
869 # expensive to convert between the flat manifest and tree manifest on
882 # expensive to convert between the flat manifest and tree manifest on
870 # the fly. Since tree manifests are hashed differently, all of history
883 # the fly. Since tree manifests are hashed differently, all of history
871 # would have to be converted. Instead, we simply don't even pretend to
884 # would have to be converted. Instead, we simply don't even pretend to
872 # support versions 01 and 02.
885 # support versions 01 and 02.
873 versions.discard('01')
886 versions.discard('01')
874 versions.discard('02')
887 versions.discard('02')
875 return versions
888 return versions
876
889
877 def safeversion(repo):
890 def safeversion(repo):
878 # Finds the smallest version that it's safe to assume clients of the repo
891 # Finds the smallest version that it's safe to assume clients of the repo
879 # will support. For example, all hg versions that support generaldelta also
892 # will support. For example, all hg versions that support generaldelta also
880 # support changegroup 02.
893 # support changegroup 02.
881 versions = supportedoutgoingversions(repo)
894 versions = supportedoutgoingversions(repo)
882 if 'generaldelta' in repo.requirements:
895 if 'generaldelta' in repo.requirements:
883 versions.discard('01')
896 versions.discard('01')
884 assert versions
897 assert versions
885 return min(versions)
898 return min(versions)
886
899
887 def getbundler(version, repo, bundlecaps=None):
900 def getbundler(version, repo, bundlecaps=None):
888 assert version in supportedoutgoingversions(repo)
901 assert version in supportedoutgoingversions(repo)
889 return _packermap[version][0](repo, bundlecaps)
902 return _packermap[version][0](repo, bundlecaps)
890
903
891 def getunbundler(version, fh, alg, extras=None):
904 def getunbundler(version, fh, alg, extras=None):
892 return _packermap[version][1](fh, alg, extras=extras)
905 return _packermap[version][1](fh, alg, extras=extras)
893
906
894 def _changegroupinfo(repo, nodes, source):
907 def _changegroupinfo(repo, nodes, source):
895 if repo.ui.verbose or source == 'bundle':
908 if repo.ui.verbose or source == 'bundle':
896 repo.ui.status(_("%d changesets found\n") % len(nodes))
909 repo.ui.status(_("%d changesets found\n") % len(nodes))
897 if repo.ui.debugflag:
910 if repo.ui.debugflag:
898 repo.ui.debug("list of changesets:\n")
911 repo.ui.debug("list of changesets:\n")
899 for node in nodes:
912 for node in nodes:
900 repo.ui.debug("%s\n" % hex(node))
913 repo.ui.debug("%s\n" % hex(node))
901
914
902 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
915 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
903 repo = repo.unfiltered()
916 repo = repo.unfiltered()
904 commonrevs = outgoing.common
917 commonrevs = outgoing.common
905 csets = outgoing.missing
918 csets = outgoing.missing
906 heads = outgoing.missingheads
919 heads = outgoing.missingheads
907 # We go through the fast path if we get told to, or if all (unfiltered
920 # We go through the fast path if we get told to, or if all (unfiltered
908 # heads have been requested (since we then know there all linkrevs will
921 # heads have been requested (since we then know there all linkrevs will
909 # be pulled by the client).
922 # be pulled by the client).
910 heads.sort()
923 heads.sort()
911 fastpathlinkrev = fastpath or (
924 fastpathlinkrev = fastpath or (
912 repo.filtername is None and heads == sorted(repo.heads()))
925 repo.filtername is None and heads == sorted(repo.heads()))
913
926
914 repo.hook('preoutgoing', throw=True, source=source)
927 repo.hook('preoutgoing', throw=True, source=source)
915 _changegroupinfo(repo, csets, source)
928 _changegroupinfo(repo, csets, source)
916 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
929 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
917
930
918 def getsubset(repo, outgoing, bundler, source, fastpath=False):
931 def getsubset(repo, outgoing, bundler, source, fastpath=False):
919 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
932 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
920 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
933 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
921 {'clcount': len(outgoing.missing)})
934 {'clcount': len(outgoing.missing)})
922
935
923 def changegroupsubset(repo, roots, heads, source, version='01'):
936 def changegroupsubset(repo, roots, heads, source, version='01'):
924 """Compute a changegroup consisting of all the nodes that are
937 """Compute a changegroup consisting of all the nodes that are
925 descendants of any of the roots and ancestors of any of the heads.
938 descendants of any of the roots and ancestors of any of the heads.
926 Return a chunkbuffer object whose read() method will return
939 Return a chunkbuffer object whose read() method will return
927 successive changegroup chunks.
940 successive changegroup chunks.
928
941
929 It is fairly complex as determining which filenodes and which
942 It is fairly complex as determining which filenodes and which
930 manifest nodes need to be included for the changeset to be complete
943 manifest nodes need to be included for the changeset to be complete
931 is non-trivial.
944 is non-trivial.
932
945
933 Another wrinkle is doing the reverse, figuring out which changeset in
946 Another wrinkle is doing the reverse, figuring out which changeset in
934 the changegroup a particular filenode or manifestnode belongs to.
947 the changegroup a particular filenode or manifestnode belongs to.
935 """
948 """
936 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
949 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
937 bundler = getbundler(version, repo)
950 bundler = getbundler(version, repo)
938 return getsubset(repo, outgoing, bundler, source)
951 return getsubset(repo, outgoing, bundler, source)
939
952
940 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
953 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
941 version='01'):
954 version='01'):
942 """Like getbundle, but taking a discovery.outgoing as an argument.
955 """Like getbundle, but taking a discovery.outgoing as an argument.
943
956
944 This is only implemented for local repos and reuses potentially
957 This is only implemented for local repos and reuses potentially
945 precomputed sets in outgoing. Returns a raw changegroup generator."""
958 precomputed sets in outgoing. Returns a raw changegroup generator."""
946 if not outgoing.missing:
959 if not outgoing.missing:
947 return None
960 return None
948 bundler = getbundler(version, repo, bundlecaps)
961 bundler = getbundler(version, repo, bundlecaps)
949 return getsubsetraw(repo, outgoing, bundler, source)
962 return getsubsetraw(repo, outgoing, bundler, source)
950
963
951 def getchangegroup(repo, source, outgoing, bundlecaps=None,
964 def getchangegroup(repo, source, outgoing, bundlecaps=None,
952 version='01'):
965 version='01'):
953 """Like getbundle, but taking a discovery.outgoing as an argument.
966 """Like getbundle, but taking a discovery.outgoing as an argument.
954
967
955 This is only implemented for local repos and reuses potentially
968 This is only implemented for local repos and reuses potentially
956 precomputed sets in outgoing."""
969 precomputed sets in outgoing."""
957 if not outgoing.missing:
970 if not outgoing.missing:
958 return None
971 return None
959 bundler = getbundler(version, repo, bundlecaps)
972 bundler = getbundler(version, repo, bundlecaps)
960 return getsubset(repo, outgoing, bundler, source)
973 return getsubset(repo, outgoing, bundler, source)
961
974
962 def getlocalchangegroup(repo, *args, **kwargs):
975 def getlocalchangegroup(repo, *args, **kwargs):
963 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
976 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
964 '4.3')
977 '4.3')
965 return getchangegroup(repo, *args, **kwargs)
978 return getchangegroup(repo, *args, **kwargs)
966
979
967 def changegroup(repo, basenodes, source):
980 def changegroup(repo, basenodes, source):
968 # to avoid a race we use changegroupsubset() (issue1320)
981 # to avoid a race we use changegroupsubset() (issue1320)
969 return changegroupsubset(repo, basenodes, repo.heads(), source)
982 return changegroupsubset(repo, basenodes, repo.heads(), source)
970
983
971 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
984 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
972 revisions = 0
985 revisions = 0
973 files = 0
986 files = 0
974 for chunkdata in iter(source.filelogheader, {}):
987 for chunkdata in iter(source.filelogheader, {}):
975 files += 1
988 files += 1
976 f = chunkdata["filename"]
989 f = chunkdata["filename"]
977 repo.ui.debug("adding %s revisions\n" % f)
990 repo.ui.debug("adding %s revisions\n" % f)
978 repo.ui.progress(_('files'), files, unit=_('files'),
991 repo.ui.progress(_('files'), files, unit=_('files'),
979 total=expectedfiles)
992 total=expectedfiles)
980 fl = repo.file(f)
993 fl = repo.file(f)
981 o = len(fl)
994 o = len(fl)
982 try:
995 try:
983 if not fl.addgroup(source, revmap, trp):
996 if not fl.addgroup(source, revmap, trp):
984 raise error.Abort(_("received file revlog group is empty"))
997 raise error.Abort(_("received file revlog group is empty"))
985 except error.CensoredBaseError as e:
998 except error.CensoredBaseError as e:
986 raise error.Abort(_("received delta base is censored: %s") % e)
999 raise error.Abort(_("received delta base is censored: %s") % e)
987 revisions += len(fl) - o
1000 revisions += len(fl) - o
988 if f in needfiles:
1001 if f in needfiles:
989 needs = needfiles[f]
1002 needs = needfiles[f]
990 for new in xrange(o, len(fl)):
1003 for new in xrange(o, len(fl)):
991 n = fl.node(new)
1004 n = fl.node(new)
992 if n in needs:
1005 if n in needs:
993 needs.remove(n)
1006 needs.remove(n)
994 else:
1007 else:
995 raise error.Abort(
1008 raise error.Abort(
996 _("received spurious file revlog entry"))
1009 _("received spurious file revlog entry"))
997 if not needs:
1010 if not needs:
998 del needfiles[f]
1011 del needfiles[f]
999 repo.ui.progress(_('files'), None)
1012 repo.ui.progress(_('files'), None)
1000
1013
1001 for f, needs in needfiles.iteritems():
1014 for f, needs in needfiles.iteritems():
1002 fl = repo.file(f)
1015 fl = repo.file(f)
1003 for n in needs:
1016 for n in needs:
1004 try:
1017 try:
1005 fl.rev(n)
1018 fl.rev(n)
1006 except error.LookupError:
1019 except error.LookupError:
1007 raise error.Abort(
1020 raise error.Abort(
1008 _('missing file data for %s:%s - run hg verify') %
1021 _('missing file data for %s:%s - run hg verify') %
1009 (f, hex(n)))
1022 (f, hex(n)))
1010
1023
1011 return revisions, files
1024 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now