##// END OF EJS Templates
changegroup: delete "if True" and reflow
Martin von Zweigbergk -
r32931:b08431e1 default
parent child Browse files
Show More
@@ -1,1027 +1,1025 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 dagutil,
23 dagutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def combineresults(results):
63 def combineresults(results):
64 """logic to combine 0 or more addchangegroup results into one"""
64 """logic to combine 0 or more addchangegroup results into one"""
65 changedheads = 0
65 changedheads = 0
66 result = 1
66 result = 1
67 for ret in results:
67 for ret in results:
68 # If any changegroup result is 0, return 0
68 # If any changegroup result is 0, return 0
69 if ret == 0:
69 if ret == 0:
70 result = 0
70 result = 0
71 break
71 break
72 if ret < -1:
72 if ret < -1:
73 changedheads += ret + 1
73 changedheads += ret + 1
74 elif ret > 1:
74 elif ret > 1:
75 changedheads += ret - 1
75 changedheads += ret - 1
76 if changedheads > 0:
76 if changedheads > 0:
77 result = 1 + changedheads
77 result = 1 + changedheads
78 elif changedheads < 0:
78 elif changedheads < 0:
79 result = -1 + changedheads
79 result = -1 + changedheads
80 return result
80 return result
81
81
82 def writechunks(ui, chunks, filename, vfs=None):
82 def writechunks(ui, chunks, filename, vfs=None):
83 """Write chunks to a file and return its filename.
83 """Write chunks to a file and return its filename.
84
84
85 The stream is assumed to be a bundle file.
85 The stream is assumed to be a bundle file.
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 """
88 """
89 fh = None
89 fh = None
90 cleanup = None
90 cleanup = None
91 try:
91 try:
92 if filename:
92 if filename:
93 if vfs:
93 if vfs:
94 fh = vfs.open(filename, "wb")
94 fh = vfs.open(filename, "wb")
95 else:
95 else:
96 # Increase default buffer size because default is usually
96 # Increase default buffer size because default is usually
97 # small (4k is common on Linux).
97 # small (4k is common on Linux).
98 fh = open(filename, "wb", 131072)
98 fh = open(filename, "wb", 131072)
99 else:
99 else:
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fh = os.fdopen(fd, pycompat.sysstr("wb"))
101 fh = os.fdopen(fd, pycompat.sysstr("wb"))
102 cleanup = filename
102 cleanup = filename
103 for c in chunks:
103 for c in chunks:
104 fh.write(c)
104 fh.write(c)
105 cleanup = None
105 cleanup = None
106 return filename
106 return filename
107 finally:
107 finally:
108 if fh is not None:
108 if fh is not None:
109 fh.close()
109 fh.close()
110 if cleanup is not None:
110 if cleanup is not None:
111 if filename and vfs:
111 if filename and vfs:
112 vfs.unlink(cleanup)
112 vfs.unlink(cleanup)
113 else:
113 else:
114 os.unlink(cleanup)
114 os.unlink(cleanup)
115
115
116 class cg1unpacker(object):
116 class cg1unpacker(object):
117 """Unpacker for cg1 changegroup streams.
117 """Unpacker for cg1 changegroup streams.
118
118
119 A changegroup unpacker handles the framing of the revision data in
119 A changegroup unpacker handles the framing of the revision data in
120 the wire format. Most consumers will want to use the apply()
120 the wire format. Most consumers will want to use the apply()
121 method to add the changes from the changegroup to a repository.
121 method to add the changes from the changegroup to a repository.
122
122
123 If you're forwarding a changegroup unmodified to another consumer,
123 If you're forwarding a changegroup unmodified to another consumer,
124 use getchunks(), which returns an iterator of changegroup
124 use getchunks(), which returns an iterator of changegroup
125 chunks. This is mostly useful for cases where you need to know the
125 chunks. This is mostly useful for cases where you need to know the
126 data stream has ended by observing the end of the changegroup.
126 data stream has ended by observing the end of the changegroup.
127
127
128 deltachunk() is useful only if you're applying delta data. Most
128 deltachunk() is useful only if you're applying delta data. Most
129 consumers should prefer apply() instead.
129 consumers should prefer apply() instead.
130
130
131 A few other public methods exist. Those are used only for
131 A few other public methods exist. Those are used only for
132 bundlerepo and some debug commands - their use is discouraged.
132 bundlerepo and some debug commands - their use is discouraged.
133 """
133 """
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 version = '01'
136 version = '01'
137 _grouplistcount = 1 # One list of files after the manifests
137 _grouplistcount = 1 # One list of files after the manifests
138
138
139 def __init__(self, fh, alg, extras=None):
139 def __init__(self, fh, alg, extras=None):
140 if alg is None:
140 if alg is None:
141 alg = 'UN'
141 alg = 'UN'
142 if alg not in util.compengines.supportedbundletypes:
142 if alg not in util.compengines.supportedbundletypes:
143 raise error.Abort(_('unknown stream compression type: %s')
143 raise error.Abort(_('unknown stream compression type: %s')
144 % alg)
144 % alg)
145 if alg == 'BZ':
145 if alg == 'BZ':
146 alg = '_truncatedBZ'
146 alg = '_truncatedBZ'
147
147
148 compengine = util.compengines.forbundletype(alg)
148 compengine = util.compengines.forbundletype(alg)
149 self._stream = compengine.decompressorreader(fh)
149 self._stream = compengine.decompressorreader(fh)
150 self._type = alg
150 self._type = alg
151 self.extras = extras or {}
151 self.extras = extras or {}
152 self.callback = None
152 self.callback = None
153
153
154 # These methods (compressed, read, seek, tell) all appear to only
154 # These methods (compressed, read, seek, tell) all appear to only
155 # be used by bundlerepo, but it's a little hard to tell.
155 # be used by bundlerepo, but it's a little hard to tell.
156 def compressed(self):
156 def compressed(self):
157 return self._type is not None and self._type != 'UN'
157 return self._type is not None and self._type != 'UN'
158 def read(self, l):
158 def read(self, l):
159 return self._stream.read(l)
159 return self._stream.read(l)
160 def seek(self, pos):
160 def seek(self, pos):
161 return self._stream.seek(pos)
161 return self._stream.seek(pos)
162 def tell(self):
162 def tell(self):
163 return self._stream.tell()
163 return self._stream.tell()
164 def close(self):
164 def close(self):
165 return self._stream.close()
165 return self._stream.close()
166
166
167 def _chunklength(self):
167 def _chunklength(self):
168 d = readexactly(self._stream, 4)
168 d = readexactly(self._stream, 4)
169 l = struct.unpack(">l", d)[0]
169 l = struct.unpack(">l", d)[0]
170 if l <= 4:
170 if l <= 4:
171 if l:
171 if l:
172 raise error.Abort(_("invalid chunk length %d") % l)
172 raise error.Abort(_("invalid chunk length %d") % l)
173 return 0
173 return 0
174 if self.callback:
174 if self.callback:
175 self.callback()
175 self.callback()
176 return l - 4
176 return l - 4
177
177
178 def changelogheader(self):
178 def changelogheader(self):
179 """v10 does not have a changelog header chunk"""
179 """v10 does not have a changelog header chunk"""
180 return {}
180 return {}
181
181
182 def manifestheader(self):
182 def manifestheader(self):
183 """v10 does not have a manifest header chunk"""
183 """v10 does not have a manifest header chunk"""
184 return {}
184 return {}
185
185
186 def filelogheader(self):
186 def filelogheader(self):
187 """return the header of the filelogs chunk, v10 only has the filename"""
187 """return the header of the filelogs chunk, v10 only has the filename"""
188 l = self._chunklength()
188 l = self._chunklength()
189 if not l:
189 if not l:
190 return {}
190 return {}
191 fname = readexactly(self._stream, l)
191 fname = readexactly(self._stream, l)
192 return {'filename': fname}
192 return {'filename': fname}
193
193
194 def _deltaheader(self, headertuple, prevnode):
194 def _deltaheader(self, headertuple, prevnode):
195 node, p1, p2, cs = headertuple
195 node, p1, p2, cs = headertuple
196 if prevnode is None:
196 if prevnode is None:
197 deltabase = p1
197 deltabase = p1
198 else:
198 else:
199 deltabase = prevnode
199 deltabase = prevnode
200 flags = 0
200 flags = 0
201 return node, p1, p2, deltabase, cs, flags
201 return node, p1, p2, deltabase, cs, flags
202
202
203 def deltachunk(self, prevnode):
203 def deltachunk(self, prevnode):
204 l = self._chunklength()
204 l = self._chunklength()
205 if not l:
205 if not l:
206 return {}
206 return {}
207 headerdata = readexactly(self._stream, self.deltaheadersize)
207 headerdata = readexactly(self._stream, self.deltaheadersize)
208 header = struct.unpack(self.deltaheader, headerdata)
208 header = struct.unpack(self.deltaheader, headerdata)
209 delta = readexactly(self._stream, l - self.deltaheadersize)
209 delta = readexactly(self._stream, l - self.deltaheadersize)
210 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
210 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
211 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
211 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
212 'deltabase': deltabase, 'delta': delta, 'flags': flags}
212 'deltabase': deltabase, 'delta': delta, 'flags': flags}
213
213
214 def getchunks(self):
214 def getchunks(self):
215 """returns all the chunks contains in the bundle
215 """returns all the chunks contains in the bundle
216
216
217 Used when you need to forward the binary stream to a file or another
217 Used when you need to forward the binary stream to a file or another
218 network API. To do so, it parse the changegroup data, otherwise it will
218 network API. To do so, it parse the changegroup data, otherwise it will
219 block in case of sshrepo because it don't know the end of the stream.
219 block in case of sshrepo because it don't know the end of the stream.
220 """
220 """
221 # an empty chunkgroup is the end of the changegroup
221 # an empty chunkgroup is the end of the changegroup
222 # a changegroup has at least 2 chunkgroups (changelog and manifest).
222 # a changegroup has at least 2 chunkgroups (changelog and manifest).
223 # after that, changegroup versions 1 and 2 have a series of groups
223 # after that, changegroup versions 1 and 2 have a series of groups
224 # with one group per file. changegroup 3 has a series of directory
224 # with one group per file. changegroup 3 has a series of directory
225 # manifests before the files.
225 # manifests before the files.
226 count = 0
226 count = 0
227 emptycount = 0
227 emptycount = 0
228 while emptycount < self._grouplistcount:
228 while emptycount < self._grouplistcount:
229 empty = True
229 empty = True
230 count += 1
230 count += 1
231 while True:
231 while True:
232 chunk = getchunk(self)
232 chunk = getchunk(self)
233 if not chunk:
233 if not chunk:
234 if empty and count > 2:
234 if empty and count > 2:
235 emptycount += 1
235 emptycount += 1
236 break
236 break
237 empty = False
237 empty = False
238 yield chunkheader(len(chunk))
238 yield chunkheader(len(chunk))
239 pos = 0
239 pos = 0
240 while pos < len(chunk):
240 while pos < len(chunk):
241 next = pos + 2**20
241 next = pos + 2**20
242 yield chunk[pos:next]
242 yield chunk[pos:next]
243 pos = next
243 pos = next
244 yield closechunk()
244 yield closechunk()
245
245
246 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
246 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
247 # We know that we'll never have more manifests than we had
247 # We know that we'll never have more manifests than we had
248 # changesets.
248 # changesets.
249 self.callback = prog(_('manifests'), numchanges)
249 self.callback = prog(_('manifests'), numchanges)
250 # no need to check for empty manifest group here:
250 # no need to check for empty manifest group here:
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
252 # no new manifest will be created and the manifest group will
252 # no new manifest will be created and the manifest group will
253 # be empty during the pull
253 # be empty during the pull
254 self.manifestheader()
254 self.manifestheader()
255 repo.manifestlog._revlog.addgroup(self, revmap, trp)
255 repo.manifestlog._revlog.addgroup(self, revmap, trp)
256 repo.ui.progress(_('manifests'), None)
256 repo.ui.progress(_('manifests'), None)
257 self.callback = None
257 self.callback = None
258
258
259 def apply(self, repo, tr, srctype, url, emptyok=False,
259 def apply(self, repo, tr, srctype, url, emptyok=False,
260 targetphase=phases.draft, expectedtotal=None):
260 targetphase=phases.draft, expectedtotal=None):
261 """Add the changegroup returned by source.read() to this repo.
261 """Add the changegroup returned by source.read() to this repo.
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
263 the URL of the repo where this changegroup is coming from.
263 the URL of the repo where this changegroup is coming from.
264
264
265 Return an integer summarizing the change to this repo:
265 Return an integer summarizing the change to this repo:
266 - nothing changed or no source: 0
266 - nothing changed or no source: 0
267 - more heads than before: 1+added heads (2..n)
267 - more heads than before: 1+added heads (2..n)
268 - fewer heads than before: -1-removed heads (-2..-n)
268 - fewer heads than before: -1-removed heads (-2..-n)
269 - number of heads stays the same: 1
269 - number of heads stays the same: 1
270 """
270 """
271 repo = repo.unfiltered()
271 repo = repo.unfiltered()
272 def csmap(x):
272 def csmap(x):
273 repo.ui.debug("add changeset %s\n" % short(x))
273 repo.ui.debug("add changeset %s\n" % short(x))
274 return len(cl)
274 return len(cl)
275
275
276 def revmap(x):
276 def revmap(x):
277 return cl.rev(x)
277 return cl.rev(x)
278
278
279 changesets = files = revisions = 0
279 changesets = files = revisions = 0
280
280
281 try:
281 try:
282 if True:
282 # The transaction may already carry source information. In this
283 # The transaction may already carry source information. In this
283 # case we use the top level data. We overwrite the argument
284 # case we use the top level data. We overwrite the argument
284 # because we need to use the top level value (if they exist)
285 # because we need to use the top level value (if they exist)
285 # in this function.
286 # in this function.
286 srctype = tr.hookargs.setdefault('source', srctype)
287 srctype = tr.hookargs.setdefault('source', srctype)
287 url = tr.hookargs.setdefault('url', url)
288 url = tr.hookargs.setdefault('url', url)
288 repo.hook('prechangegroup', throw=True, **tr.hookargs)
289 repo.hook('prechangegroup', throw=True, **tr.hookargs)
290
289
291 # write changelog data to temp files so concurrent readers
290 # write changelog data to temp files so concurrent readers
292 # will not see an inconsistent view
291 # will not see an inconsistent view
293 cl = repo.changelog
292 cl = repo.changelog
294 cl.delayupdate(tr)
293 cl.delayupdate(tr)
295 oldheads = set(cl.heads())
294 oldheads = set(cl.heads())
296
295
297 trp = weakref.proxy(tr)
296 trp = weakref.proxy(tr)
298 # pull off the changeset group
297 # pull off the changeset group
299 repo.ui.status(_("adding changesets\n"))
298 repo.ui.status(_("adding changesets\n"))
300 clstart = len(cl)
299 clstart = len(cl)
301 class prog(object):
300 class prog(object):
302 def __init__(self, step, total):
301 def __init__(self, step, total):
303 self._step = step
302 self._step = step
304 self._total = total
303 self._total = total
305 self._count = 1
304 self._count = 1
306 def __call__(self):
305 def __call__(self):
307 repo.ui.progress(self._step, self._count,
306 repo.ui.progress(self._step, self._count, unit=_('chunks'),
308 unit=_('chunks'), total=self._total)
307 total=self._total)
309 self._count += 1
308 self._count += 1
310 self.callback = prog(_('changesets'), expectedtotal)
309 self.callback = prog(_('changesets'), expectedtotal)
311
310
312 efiles = set()
311 efiles = set()
313 def onchangelog(cl, node):
312 def onchangelog(cl, node):
314 efiles.update(cl.readfiles(node))
313 efiles.update(cl.readfiles(node))
315
314
316 self.changelogheader()
315 self.changelogheader()
317 cgnodes = cl.addgroup(self, csmap, trp,
316 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
318 addrevisioncb=onchangelog)
317 efiles = len(efiles)
319 efiles = len(efiles)
320
318
321 if not (cgnodes or emptyok):
319 if not (cgnodes or emptyok):
322 raise error.Abort(_("received changelog group is empty"))
320 raise error.Abort(_("received changelog group is empty"))
323 clend = len(cl)
321 clend = len(cl)
324 changesets = clend - clstart
322 changesets = clend - clstart
325 repo.ui.progress(_('changesets'), None)
323 repo.ui.progress(_('changesets'), None)
326 self.callback = None
324 self.callback = None
327
325
328 # pull off the manifest group
326 # pull off the manifest group
329 repo.ui.status(_("adding manifests\n"))
327 repo.ui.status(_("adding manifests\n"))
330 self._unpackmanifests(repo, revmap, trp, prog, changesets)
328 self._unpackmanifests(repo, revmap, trp, prog, changesets)
331
329
332 needfiles = {}
330 needfiles = {}
333 if repo.ui.configbool('server', 'validate', default=False):
331 if repo.ui.configbool('server', 'validate', default=False):
334 cl = repo.changelog
332 cl = repo.changelog
335 ml = repo.manifestlog
333 ml = repo.manifestlog
336 # validate incoming csets have their manifests
334 # validate incoming csets have their manifests
337 for cset in xrange(clstart, clend):
335 for cset in xrange(clstart, clend):
338 mfnode = cl.changelogrevision(cset).manifest
336 mfnode = cl.changelogrevision(cset).manifest
339 mfest = ml[mfnode].readdelta()
337 mfest = ml[mfnode].readdelta()
340 # store file cgnodes we must see
338 # store file cgnodes we must see
341 for f, n in mfest.iteritems():
339 for f, n in mfest.iteritems():
342 needfiles.setdefault(f, set()).add(n)
340 needfiles.setdefault(f, set()).add(n)
343
341
344 # process the files
342 # process the files
345 repo.ui.status(_("adding file changes\n"))
343 repo.ui.status(_("adding file changes\n"))
346 newrevs, newfiles = _addchangegroupfiles(
344 newrevs, newfiles = _addchangegroupfiles(
347 repo, self, revmap, trp, efiles, needfiles)
345 repo, self, revmap, trp, efiles, needfiles)
348 revisions += newrevs
346 revisions += newrevs
349 files += newfiles
347 files += newfiles
350
348
351 deltaheads = 0
349 deltaheads = 0
352 if oldheads:
350 if oldheads:
353 heads = cl.heads()
351 heads = cl.heads()
354 deltaheads = len(heads) - len(oldheads)
352 deltaheads = len(heads) - len(oldheads)
355 for h in heads:
353 for h in heads:
356 if h not in oldheads and repo[h].closesbranch():
354 if h not in oldheads and repo[h].closesbranch():
357 deltaheads -= 1
355 deltaheads -= 1
358 htext = ""
356 htext = ""
359 if deltaheads:
357 if deltaheads:
360 htext = _(" (%+d heads)") % deltaheads
358 htext = _(" (%+d heads)") % deltaheads
361
359
362 repo.ui.status(_("added %d changesets"
360 repo.ui.status(_("added %d changesets"
363 " with %d changes to %d files%s\n")
361 " with %d changes to %d files%s\n")
364 % (changesets, revisions, files, htext))
362 % (changesets, revisions, files, htext))
365 repo.invalidatevolatilesets()
363 repo.invalidatevolatilesets()
366
364
367 if changesets > 0:
365 if changesets > 0:
368 if 'node' not in tr.hookargs:
366 if 'node' not in tr.hookargs:
369 tr.hookargs['node'] = hex(cl.node(clstart))
367 tr.hookargs['node'] = hex(cl.node(clstart))
370 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
368 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
371 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
372 else:
370 else:
373 hookargs = dict(tr.hookargs)
371 hookargs = dict(tr.hookargs)
374 hookargs['node'] = hex(cl.node(clstart))
372 hookargs['node'] = hex(cl.node(clstart))
375 hookargs['node_last'] = hex(cl.node(clend - 1))
373 hookargs['node_last'] = hex(cl.node(clend - 1))
376 repo.hook('pretxnchangegroup', throw=True, **hookargs)
374 repo.hook('pretxnchangegroup', throw=True, **hookargs)
377
375
378 added = [cl.node(r) for r in xrange(clstart, clend)]
376 added = [cl.node(r) for r in xrange(clstart, clend)]
379 if srctype in ('push', 'serve'):
377 if srctype in ('push', 'serve'):
380 # Old servers can not push the boundary themselves.
378 # Old servers can not push the boundary themselves.
381 # New servers won't push the boundary if changeset already
379 # New servers won't push the boundary if changeset already
382 # exists locally as secret
380 # exists locally as secret
383 #
381 #
384 # We should not use added here but the list of all change in
382 # We should not use added here but the list of all change in
385 # the bundle
383 # the bundle
386 if repo.publishing():
384 if repo.publishing():
387 phases.advanceboundary(repo, tr, phases.public, cgnodes)
385 phases.advanceboundary(repo, tr, phases.public, cgnodes)
388 else:
386 else:
389 # Those changesets have been pushed from the
387 # Those changesets have been pushed from the
390 # outside, their phases are going to be pushed
388 # outside, their phases are going to be pushed
391 # alongside. Therefor `targetphase` is
389 # alongside. Therefor `targetphase` is
392 # ignored.
390 # ignored.
393 phases.advanceboundary(repo, tr, phases.draft, cgnodes)
391 phases.advanceboundary(repo, tr, phases.draft, cgnodes)
394 phases.retractboundary(repo, tr, phases.draft, added)
392 phases.retractboundary(repo, tr, phases.draft, added)
395 elif srctype != 'strip':
393 elif srctype != 'strip':
396 # publishing only alter behavior during push
394 # publishing only alter behavior during push
397 #
395 #
398 # strip should not touch boundary at all
396 # strip should not touch boundary at all
399 phases.retractboundary(repo, tr, targetphase, added)
397 phases.retractboundary(repo, tr, targetphase, added)
400
398
401 if changesets > 0:
399 if changesets > 0:
402
400
403 def runhooks():
401 def runhooks():
404 # These hooks run when the lock releases, not when the
402 # These hooks run when the lock releases, not when the
405 # transaction closes. So it's possible for the changelog
403 # transaction closes. So it's possible for the changelog
406 # to have changed since we last saw it.
404 # to have changed since we last saw it.
407 if clstart >= len(repo):
405 if clstart >= len(repo):
408 return
406 return
409
407
410 repo.hook("changegroup", **hookargs)
408 repo.hook("changegroup", **hookargs)
411
409
412 for n in added:
410 for n in added:
413 args = hookargs.copy()
411 args = hookargs.copy()
414 args['node'] = hex(n)
412 args['node'] = hex(n)
415 del args['node_last']
413 del args['node_last']
416 repo.hook("incoming", **args)
414 repo.hook("incoming", **args)
417
415
418 newheads = [h for h in repo.heads()
416 newheads = [h for h in repo.heads()
419 if h not in oldheads]
417 if h not in oldheads]
420 repo.ui.log("incoming",
418 repo.ui.log("incoming",
421 "%s incoming changes - new heads: %s\n",
419 "%s incoming changes - new heads: %s\n",
422 len(added),
420 len(added),
423 ', '.join([hex(c[:6]) for c in newheads]))
421 ', '.join([hex(c[:6]) for c in newheads]))
424
422
425 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
426 lambda tr: repo._afterlock(runhooks))
424 lambda tr: repo._afterlock(runhooks))
427 finally:
425 finally:
428 repo.ui.flush()
426 repo.ui.flush()
429 # never return 0 here:
427 # never return 0 here:
430 if deltaheads < 0:
428 if deltaheads < 0:
431 return deltaheads - 1
429 return deltaheads - 1
432 else:
430 else:
433 return deltaheads + 1
431 return deltaheads + 1
434
432
435 class cg2unpacker(cg1unpacker):
433 class cg2unpacker(cg1unpacker):
436 """Unpacker for cg2 streams.
434 """Unpacker for cg2 streams.
437
435
438 cg2 streams add support for generaldelta, so the delta header
436 cg2 streams add support for generaldelta, so the delta header
439 format is slightly different. All other features about the data
437 format is slightly different. All other features about the data
440 remain the same.
438 remain the same.
441 """
439 """
442 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
440 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
443 deltaheadersize = struct.calcsize(deltaheader)
441 deltaheadersize = struct.calcsize(deltaheader)
444 version = '02'
442 version = '02'
445
443
446 def _deltaheader(self, headertuple, prevnode):
444 def _deltaheader(self, headertuple, prevnode):
447 node, p1, p2, deltabase, cs = headertuple
445 node, p1, p2, deltabase, cs = headertuple
448 flags = 0
446 flags = 0
449 return node, p1, p2, deltabase, cs, flags
447 return node, p1, p2, deltabase, cs, flags
450
448
451 class cg3unpacker(cg2unpacker):
449 class cg3unpacker(cg2unpacker):
452 """Unpacker for cg3 streams.
450 """Unpacker for cg3 streams.
453
451
454 cg3 streams add support for exchanging treemanifests and revlog
452 cg3 streams add support for exchanging treemanifests and revlog
455 flags. It adds the revlog flags to the delta header and an empty chunk
453 flags. It adds the revlog flags to the delta header and an empty chunk
456 separating manifests and files.
454 separating manifests and files.
457 """
455 """
458 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
456 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
459 deltaheadersize = struct.calcsize(deltaheader)
457 deltaheadersize = struct.calcsize(deltaheader)
460 version = '03'
458 version = '03'
461 _grouplistcount = 2 # One list of manifests and one list of files
459 _grouplistcount = 2 # One list of manifests and one list of files
462
460
463 def _deltaheader(self, headertuple, prevnode):
461 def _deltaheader(self, headertuple, prevnode):
464 node, p1, p2, deltabase, cs, flags = headertuple
462 node, p1, p2, deltabase, cs, flags = headertuple
465 return node, p1, p2, deltabase, cs, flags
463 return node, p1, p2, deltabase, cs, flags
466
464
467 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
465 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
468 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
466 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
469 numchanges)
467 numchanges)
470 for chunkdata in iter(self.filelogheader, {}):
468 for chunkdata in iter(self.filelogheader, {}):
471 # If we get here, there are directory manifests in the changegroup
469 # If we get here, there are directory manifests in the changegroup
472 d = chunkdata["filename"]
470 d = chunkdata["filename"]
473 repo.ui.debug("adding %s revisions\n" % d)
471 repo.ui.debug("adding %s revisions\n" % d)
474 dirlog = repo.manifestlog._revlog.dirlog(d)
472 dirlog = repo.manifestlog._revlog.dirlog(d)
475 if not dirlog.addgroup(self, revmap, trp):
473 if not dirlog.addgroup(self, revmap, trp):
476 raise error.Abort(_("received dir revlog group is empty"))
474 raise error.Abort(_("received dir revlog group is empty"))
477
475
478 class headerlessfixup(object):
476 class headerlessfixup(object):
479 def __init__(self, fh, h):
477 def __init__(self, fh, h):
480 self._h = h
478 self._h = h
481 self._fh = fh
479 self._fh = fh
482 def read(self, n):
480 def read(self, n):
483 if self._h:
481 if self._h:
484 d, self._h = self._h[:n], self._h[n:]
482 d, self._h = self._h[:n], self._h[n:]
485 if len(d) < n:
483 if len(d) < n:
486 d += readexactly(self._fh, n - len(d))
484 d += readexactly(self._fh, n - len(d))
487 return d
485 return d
488 return readexactly(self._fh, n)
486 return readexactly(self._fh, n)
489
487
490 class cg1packer(object):
488 class cg1packer(object):
491 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
489 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
492 version = '01'
490 version = '01'
493 def __init__(self, repo, bundlecaps=None):
491 def __init__(self, repo, bundlecaps=None):
494 """Given a source repo, construct a bundler.
492 """Given a source repo, construct a bundler.
495
493
496 bundlecaps is optional and can be used to specify the set of
494 bundlecaps is optional and can be used to specify the set of
497 capabilities which can be used to build the bundle. While bundlecaps is
495 capabilities which can be used to build the bundle. While bundlecaps is
498 unused in core Mercurial, extensions rely on this feature to communicate
496 unused in core Mercurial, extensions rely on this feature to communicate
499 capabilities to customize the changegroup packer.
497 capabilities to customize the changegroup packer.
500 """
498 """
501 # Set of capabilities we can use to build the bundle.
499 # Set of capabilities we can use to build the bundle.
502 if bundlecaps is None:
500 if bundlecaps is None:
503 bundlecaps = set()
501 bundlecaps = set()
504 self._bundlecaps = bundlecaps
502 self._bundlecaps = bundlecaps
505 # experimental config: bundle.reorder
503 # experimental config: bundle.reorder
506 reorder = repo.ui.config('bundle', 'reorder', 'auto')
504 reorder = repo.ui.config('bundle', 'reorder', 'auto')
507 if reorder == 'auto':
505 if reorder == 'auto':
508 reorder = None
506 reorder = None
509 else:
507 else:
510 reorder = util.parsebool(reorder)
508 reorder = util.parsebool(reorder)
511 self._repo = repo
509 self._repo = repo
512 self._reorder = reorder
510 self._reorder = reorder
513 self._progress = repo.ui.progress
511 self._progress = repo.ui.progress
514 if self._repo.ui.verbose and not self._repo.ui.debugflag:
512 if self._repo.ui.verbose and not self._repo.ui.debugflag:
515 self._verbosenote = self._repo.ui.note
513 self._verbosenote = self._repo.ui.note
516 else:
514 else:
517 self._verbosenote = lambda s: None
515 self._verbosenote = lambda s: None
518
516
519 def close(self):
517 def close(self):
520 return closechunk()
518 return closechunk()
521
519
522 def fileheader(self, fname):
520 def fileheader(self, fname):
523 return chunkheader(len(fname)) + fname
521 return chunkheader(len(fname)) + fname
524
522
525 # Extracted both for clarity and for overriding in extensions.
523 # Extracted both for clarity and for overriding in extensions.
526 def _sortgroup(self, revlog, nodelist, lookup):
524 def _sortgroup(self, revlog, nodelist, lookup):
527 """Sort nodes for change group and turn them into revnums."""
525 """Sort nodes for change group and turn them into revnums."""
528 # for generaldelta revlogs, we linearize the revs; this will both be
526 # for generaldelta revlogs, we linearize the revs; this will both be
529 # much quicker and generate a much smaller bundle
527 # much quicker and generate a much smaller bundle
530 if (revlog._generaldelta and self._reorder is None) or self._reorder:
528 if (revlog._generaldelta and self._reorder is None) or self._reorder:
531 dag = dagutil.revlogdag(revlog)
529 dag = dagutil.revlogdag(revlog)
532 return dag.linearize(set(revlog.rev(n) for n in nodelist))
530 return dag.linearize(set(revlog.rev(n) for n in nodelist))
533 else:
531 else:
534 return sorted([revlog.rev(n) for n in nodelist])
532 return sorted([revlog.rev(n) for n in nodelist])
535
533
536 def group(self, nodelist, revlog, lookup, units=None):
534 def group(self, nodelist, revlog, lookup, units=None):
537 """Calculate a delta group, yielding a sequence of changegroup chunks
535 """Calculate a delta group, yielding a sequence of changegroup chunks
538 (strings).
536 (strings).
539
537
540 Given a list of changeset revs, return a set of deltas and
538 Given a list of changeset revs, return a set of deltas and
541 metadata corresponding to nodes. The first delta is
539 metadata corresponding to nodes. The first delta is
542 first parent(nodelist[0]) -> nodelist[0], the receiver is
540 first parent(nodelist[0]) -> nodelist[0], the receiver is
543 guaranteed to have this parent as it has all history before
541 guaranteed to have this parent as it has all history before
544 these changesets. In the case firstparent is nullrev the
542 these changesets. In the case firstparent is nullrev the
545 changegroup starts with a full revision.
543 changegroup starts with a full revision.
546
544
547 If units is not None, progress detail will be generated, units specifies
545 If units is not None, progress detail will be generated, units specifies
548 the type of revlog that is touched (changelog, manifest, etc.).
546 the type of revlog that is touched (changelog, manifest, etc.).
549 """
547 """
550 # if we don't have any revisions touched by these changesets, bail
548 # if we don't have any revisions touched by these changesets, bail
551 if len(nodelist) == 0:
549 if len(nodelist) == 0:
552 yield self.close()
550 yield self.close()
553 return
551 return
554
552
555 revs = self._sortgroup(revlog, nodelist, lookup)
553 revs = self._sortgroup(revlog, nodelist, lookup)
556
554
557 # add the parent of the first rev
555 # add the parent of the first rev
558 p = revlog.parentrevs(revs[0])[0]
556 p = revlog.parentrevs(revs[0])[0]
559 revs.insert(0, p)
557 revs.insert(0, p)
560
558
561 # build deltas
559 # build deltas
562 total = len(revs) - 1
560 total = len(revs) - 1
563 msgbundling = _('bundling')
561 msgbundling = _('bundling')
564 for r in xrange(len(revs) - 1):
562 for r in xrange(len(revs) - 1):
565 if units is not None:
563 if units is not None:
566 self._progress(msgbundling, r + 1, unit=units, total=total)
564 self._progress(msgbundling, r + 1, unit=units, total=total)
567 prev, curr = revs[r], revs[r + 1]
565 prev, curr = revs[r], revs[r + 1]
568 linknode = lookup(revlog.node(curr))
566 linknode = lookup(revlog.node(curr))
569 for c in self.revchunk(revlog, curr, prev, linknode):
567 for c in self.revchunk(revlog, curr, prev, linknode):
570 yield c
568 yield c
571
569
572 if units is not None:
570 if units is not None:
573 self._progress(msgbundling, None)
571 self._progress(msgbundling, None)
574 yield self.close()
572 yield self.close()
575
573
576 # filter any nodes that claim to be part of the known set
574 # filter any nodes that claim to be part of the known set
577 def prune(self, revlog, missing, commonrevs):
575 def prune(self, revlog, missing, commonrevs):
578 rr, rl = revlog.rev, revlog.linkrev
576 rr, rl = revlog.rev, revlog.linkrev
579 return [n for n in missing if rl(rr(n)) not in commonrevs]
577 return [n for n in missing if rl(rr(n)) not in commonrevs]
580
578
581 def _packmanifests(self, dir, mfnodes, lookuplinknode):
579 def _packmanifests(self, dir, mfnodes, lookuplinknode):
582 """Pack flat manifests into a changegroup stream."""
580 """Pack flat manifests into a changegroup stream."""
583 assert not dir
581 assert not dir
584 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
582 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
585 lookuplinknode, units=_('manifests')):
583 lookuplinknode, units=_('manifests')):
586 yield chunk
584 yield chunk
587
585
588 def _manifestsdone(self):
586 def _manifestsdone(self):
589 return ''
587 return ''
590
588
591 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
589 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
592 '''yield a sequence of changegroup chunks (strings)'''
590 '''yield a sequence of changegroup chunks (strings)'''
593 repo = self._repo
591 repo = self._repo
594 cl = repo.changelog
592 cl = repo.changelog
595
593
596 clrevorder = {}
594 clrevorder = {}
597 mfs = {} # needed manifests
595 mfs = {} # needed manifests
598 fnodes = {} # needed file nodes
596 fnodes = {} # needed file nodes
599 changedfiles = set()
597 changedfiles = set()
600
598
601 # Callback for the changelog, used to collect changed files and manifest
599 # Callback for the changelog, used to collect changed files and manifest
602 # nodes.
600 # nodes.
603 # Returns the linkrev node (identity in the changelog case).
601 # Returns the linkrev node (identity in the changelog case).
604 def lookupcl(x):
602 def lookupcl(x):
605 c = cl.read(x)
603 c = cl.read(x)
606 clrevorder[x] = len(clrevorder)
604 clrevorder[x] = len(clrevorder)
607 n = c[0]
605 n = c[0]
608 # record the first changeset introducing this manifest version
606 # record the first changeset introducing this manifest version
609 mfs.setdefault(n, x)
607 mfs.setdefault(n, x)
610 # Record a complete list of potentially-changed files in
608 # Record a complete list of potentially-changed files in
611 # this manifest.
609 # this manifest.
612 changedfiles.update(c[3])
610 changedfiles.update(c[3])
613 return x
611 return x
614
612
615 self._verbosenote(_('uncompressed size of bundle content:\n'))
613 self._verbosenote(_('uncompressed size of bundle content:\n'))
616 size = 0
614 size = 0
617 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
615 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
618 size += len(chunk)
616 size += len(chunk)
619 yield chunk
617 yield chunk
620 self._verbosenote(_('%8.i (changelog)\n') % size)
618 self._verbosenote(_('%8.i (changelog)\n') % size)
621
619
622 # We need to make sure that the linkrev in the changegroup refers to
620 # We need to make sure that the linkrev in the changegroup refers to
623 # the first changeset that introduced the manifest or file revision.
621 # the first changeset that introduced the manifest or file revision.
624 # The fastpath is usually safer than the slowpath, because the filelogs
622 # The fastpath is usually safer than the slowpath, because the filelogs
625 # are walked in revlog order.
623 # are walked in revlog order.
626 #
624 #
627 # When taking the slowpath with reorder=None and the manifest revlog
625 # When taking the slowpath with reorder=None and the manifest revlog
628 # uses generaldelta, the manifest may be walked in the "wrong" order.
626 # uses generaldelta, the manifest may be walked in the "wrong" order.
629 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
627 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
630 # cc0ff93d0c0c).
628 # cc0ff93d0c0c).
631 #
629 #
632 # When taking the fastpath, we are only vulnerable to reordering
630 # When taking the fastpath, we are only vulnerable to reordering
633 # of the changelog itself. The changelog never uses generaldelta, so
631 # of the changelog itself. The changelog never uses generaldelta, so
634 # it is only reordered when reorder=True. To handle this case, we
632 # it is only reordered when reorder=True. To handle this case, we
635 # simply take the slowpath, which already has the 'clrevorder' logic.
633 # simply take the slowpath, which already has the 'clrevorder' logic.
636 # This was also fixed in cc0ff93d0c0c.
634 # This was also fixed in cc0ff93d0c0c.
637 fastpathlinkrev = fastpathlinkrev and not self._reorder
635 fastpathlinkrev = fastpathlinkrev and not self._reorder
638 # Treemanifests don't work correctly with fastpathlinkrev
636 # Treemanifests don't work correctly with fastpathlinkrev
639 # either, because we don't discover which directory nodes to
637 # either, because we don't discover which directory nodes to
640 # send along with files. This could probably be fixed.
638 # send along with files. This could probably be fixed.
641 fastpathlinkrev = fastpathlinkrev and (
639 fastpathlinkrev = fastpathlinkrev and (
642 'treemanifest' not in repo.requirements)
640 'treemanifest' not in repo.requirements)
643
641
644 for chunk in self.generatemanifests(commonrevs, clrevorder,
642 for chunk in self.generatemanifests(commonrevs, clrevorder,
645 fastpathlinkrev, mfs, fnodes):
643 fastpathlinkrev, mfs, fnodes):
646 yield chunk
644 yield chunk
647 mfs.clear()
645 mfs.clear()
648 clrevs = set(cl.rev(x) for x in clnodes)
646 clrevs = set(cl.rev(x) for x in clnodes)
649
647
650 if not fastpathlinkrev:
648 if not fastpathlinkrev:
651 def linknodes(unused, fname):
649 def linknodes(unused, fname):
652 return fnodes.get(fname, {})
650 return fnodes.get(fname, {})
653 else:
651 else:
654 cln = cl.node
652 cln = cl.node
655 def linknodes(filerevlog, fname):
653 def linknodes(filerevlog, fname):
656 llr = filerevlog.linkrev
654 llr = filerevlog.linkrev
657 fln = filerevlog.node
655 fln = filerevlog.node
658 revs = ((r, llr(r)) for r in filerevlog)
656 revs = ((r, llr(r)) for r in filerevlog)
659 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
657 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
660
658
661 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
659 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
662 source):
660 source):
663 yield chunk
661 yield chunk
664
662
665 yield self.close()
663 yield self.close()
666
664
667 if clnodes:
665 if clnodes:
668 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
666 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
669
667
670 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
668 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
671 fnodes):
669 fnodes):
672 repo = self._repo
670 repo = self._repo
673 mfl = repo.manifestlog
671 mfl = repo.manifestlog
674 dirlog = mfl._revlog.dirlog
672 dirlog = mfl._revlog.dirlog
675 tmfnodes = {'': mfs}
673 tmfnodes = {'': mfs}
676
674
677 # Callback for the manifest, used to collect linkrevs for filelog
675 # Callback for the manifest, used to collect linkrevs for filelog
678 # revisions.
676 # revisions.
679 # Returns the linkrev node (collected in lookupcl).
677 # Returns the linkrev node (collected in lookupcl).
680 def makelookupmflinknode(dir):
678 def makelookupmflinknode(dir):
681 if fastpathlinkrev:
679 if fastpathlinkrev:
682 assert not dir
680 assert not dir
683 return mfs.__getitem__
681 return mfs.__getitem__
684
682
685 def lookupmflinknode(x):
683 def lookupmflinknode(x):
686 """Callback for looking up the linknode for manifests.
684 """Callback for looking up the linknode for manifests.
687
685
688 Returns the linkrev node for the specified manifest.
686 Returns the linkrev node for the specified manifest.
689
687
690 SIDE EFFECT:
688 SIDE EFFECT:
691
689
692 1) fclnodes gets populated with the list of relevant
690 1) fclnodes gets populated with the list of relevant
693 file nodes if we're not using fastpathlinkrev
691 file nodes if we're not using fastpathlinkrev
694 2) When treemanifests are in use, collects treemanifest nodes
692 2) When treemanifests are in use, collects treemanifest nodes
695 to send
693 to send
696
694
697 Note that this means manifests must be completely sent to
695 Note that this means manifests must be completely sent to
698 the client before you can trust the list of files and
696 the client before you can trust the list of files and
699 treemanifests to send.
697 treemanifests to send.
700 """
698 """
701 clnode = tmfnodes[dir][x]
699 clnode = tmfnodes[dir][x]
702 mdata = mfl.get(dir, x).readfast(shallow=True)
700 mdata = mfl.get(dir, x).readfast(shallow=True)
703 for p, n, fl in mdata.iterentries():
701 for p, n, fl in mdata.iterentries():
704 if fl == 't': # subdirectory manifest
702 if fl == 't': # subdirectory manifest
705 subdir = dir + p + '/'
703 subdir = dir + p + '/'
706 tmfclnodes = tmfnodes.setdefault(subdir, {})
704 tmfclnodes = tmfnodes.setdefault(subdir, {})
707 tmfclnode = tmfclnodes.setdefault(n, clnode)
705 tmfclnode = tmfclnodes.setdefault(n, clnode)
708 if clrevorder[clnode] < clrevorder[tmfclnode]:
706 if clrevorder[clnode] < clrevorder[tmfclnode]:
709 tmfclnodes[n] = clnode
707 tmfclnodes[n] = clnode
710 else:
708 else:
711 f = dir + p
709 f = dir + p
712 fclnodes = fnodes.setdefault(f, {})
710 fclnodes = fnodes.setdefault(f, {})
713 fclnode = fclnodes.setdefault(n, clnode)
711 fclnode = fclnodes.setdefault(n, clnode)
714 if clrevorder[clnode] < clrevorder[fclnode]:
712 if clrevorder[clnode] < clrevorder[fclnode]:
715 fclnodes[n] = clnode
713 fclnodes[n] = clnode
716 return clnode
714 return clnode
717 return lookupmflinknode
715 return lookupmflinknode
718
716
719 size = 0
717 size = 0
720 while tmfnodes:
718 while tmfnodes:
721 dir = min(tmfnodes)
719 dir = min(tmfnodes)
722 nodes = tmfnodes[dir]
720 nodes = tmfnodes[dir]
723 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
721 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
724 if not dir or prunednodes:
722 if not dir or prunednodes:
725 for x in self._packmanifests(dir, prunednodes,
723 for x in self._packmanifests(dir, prunednodes,
726 makelookupmflinknode(dir)):
724 makelookupmflinknode(dir)):
727 size += len(x)
725 size += len(x)
728 yield x
726 yield x
729 del tmfnodes[dir]
727 del tmfnodes[dir]
730 self._verbosenote(_('%8.i (manifests)\n') % size)
728 self._verbosenote(_('%8.i (manifests)\n') % size)
731 yield self._manifestsdone()
729 yield self._manifestsdone()
732
730
733 # The 'source' parameter is useful for extensions
731 # The 'source' parameter is useful for extensions
734 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
732 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
735 repo = self._repo
733 repo = self._repo
736 progress = self._progress
734 progress = self._progress
737 msgbundling = _('bundling')
735 msgbundling = _('bundling')
738
736
739 total = len(changedfiles)
737 total = len(changedfiles)
740 # for progress output
738 # for progress output
741 msgfiles = _('files')
739 msgfiles = _('files')
742 for i, fname in enumerate(sorted(changedfiles)):
740 for i, fname in enumerate(sorted(changedfiles)):
743 filerevlog = repo.file(fname)
741 filerevlog = repo.file(fname)
744 if not filerevlog:
742 if not filerevlog:
745 raise error.Abort(_("empty or missing revlog for %s") % fname)
743 raise error.Abort(_("empty or missing revlog for %s") % fname)
746
744
747 linkrevnodes = linknodes(filerevlog, fname)
745 linkrevnodes = linknodes(filerevlog, fname)
748 # Lookup for filenodes, we collected the linkrev nodes above in the
746 # Lookup for filenodes, we collected the linkrev nodes above in the
749 # fastpath case and with lookupmf in the slowpath case.
747 # fastpath case and with lookupmf in the slowpath case.
750 def lookupfilelog(x):
748 def lookupfilelog(x):
751 return linkrevnodes[x]
749 return linkrevnodes[x]
752
750
753 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
751 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
754 if filenodes:
752 if filenodes:
755 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
753 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
756 total=total)
754 total=total)
757 h = self.fileheader(fname)
755 h = self.fileheader(fname)
758 size = len(h)
756 size = len(h)
759 yield h
757 yield h
760 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
758 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
761 size += len(chunk)
759 size += len(chunk)
762 yield chunk
760 yield chunk
763 self._verbosenote(_('%8.i %s\n') % (size, fname))
761 self._verbosenote(_('%8.i %s\n') % (size, fname))
764 progress(msgbundling, None)
762 progress(msgbundling, None)
765
763
766 def deltaparent(self, revlog, rev, p1, p2, prev):
764 def deltaparent(self, revlog, rev, p1, p2, prev):
767 return prev
765 return prev
768
766
769 def revchunk(self, revlog, rev, prev, linknode):
767 def revchunk(self, revlog, rev, prev, linknode):
770 node = revlog.node(rev)
768 node = revlog.node(rev)
771 p1, p2 = revlog.parentrevs(rev)
769 p1, p2 = revlog.parentrevs(rev)
772 base = self.deltaparent(revlog, rev, p1, p2, prev)
770 base = self.deltaparent(revlog, rev, p1, p2, prev)
773
771
774 prefix = ''
772 prefix = ''
775 if revlog.iscensored(base) or revlog.iscensored(rev):
773 if revlog.iscensored(base) or revlog.iscensored(rev):
776 try:
774 try:
777 delta = revlog.revision(node, raw=True)
775 delta = revlog.revision(node, raw=True)
778 except error.CensoredNodeError as e:
776 except error.CensoredNodeError as e:
779 delta = e.tombstone
777 delta = e.tombstone
780 if base == nullrev:
778 if base == nullrev:
781 prefix = mdiff.trivialdiffheader(len(delta))
779 prefix = mdiff.trivialdiffheader(len(delta))
782 else:
780 else:
783 baselen = revlog.rawsize(base)
781 baselen = revlog.rawsize(base)
784 prefix = mdiff.replacediffheader(baselen, len(delta))
782 prefix = mdiff.replacediffheader(baselen, len(delta))
785 elif base == nullrev:
783 elif base == nullrev:
786 delta = revlog.revision(node, raw=True)
784 delta = revlog.revision(node, raw=True)
787 prefix = mdiff.trivialdiffheader(len(delta))
785 prefix = mdiff.trivialdiffheader(len(delta))
788 else:
786 else:
789 delta = revlog.revdiff(base, rev)
787 delta = revlog.revdiff(base, rev)
790 p1n, p2n = revlog.parents(node)
788 p1n, p2n = revlog.parents(node)
791 basenode = revlog.node(base)
789 basenode = revlog.node(base)
792 flags = revlog.flags(rev)
790 flags = revlog.flags(rev)
793 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
791 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
794 meta += prefix
792 meta += prefix
795 l = len(meta) + len(delta)
793 l = len(meta) + len(delta)
796 yield chunkheader(l)
794 yield chunkheader(l)
797 yield meta
795 yield meta
798 yield delta
796 yield delta
799 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
797 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
800 # do nothing with basenode, it is implicitly the previous one in HG10
798 # do nothing with basenode, it is implicitly the previous one in HG10
801 # do nothing with flags, it is implicitly 0 for cg1 and cg2
799 # do nothing with flags, it is implicitly 0 for cg1 and cg2
802 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
800 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
803
801
804 class cg2packer(cg1packer):
802 class cg2packer(cg1packer):
805 version = '02'
803 version = '02'
806 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
804 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
807
805
808 def __init__(self, repo, bundlecaps=None):
806 def __init__(self, repo, bundlecaps=None):
809 super(cg2packer, self).__init__(repo, bundlecaps)
807 super(cg2packer, self).__init__(repo, bundlecaps)
810 if self._reorder is None:
808 if self._reorder is None:
811 # Since generaldelta is directly supported by cg2, reordering
809 # Since generaldelta is directly supported by cg2, reordering
812 # generally doesn't help, so we disable it by default (treating
810 # generally doesn't help, so we disable it by default (treating
813 # bundle.reorder=auto just like bundle.reorder=False).
811 # bundle.reorder=auto just like bundle.reorder=False).
814 self._reorder = False
812 self._reorder = False
815
813
816 def deltaparent(self, revlog, rev, p1, p2, prev):
814 def deltaparent(self, revlog, rev, p1, p2, prev):
817 dp = revlog.deltaparent(rev)
815 dp = revlog.deltaparent(rev)
818 if dp == nullrev and revlog.storedeltachains:
816 if dp == nullrev and revlog.storedeltachains:
819 # Avoid sending full revisions when delta parent is null. Pick prev
817 # Avoid sending full revisions when delta parent is null. Pick prev
820 # in that case. It's tempting to pick p1 in this case, as p1 will
818 # in that case. It's tempting to pick p1 in this case, as p1 will
821 # be smaller in the common case. However, computing a delta against
819 # be smaller in the common case. However, computing a delta against
822 # p1 may require resolving the raw text of p1, which could be
820 # p1 may require resolving the raw text of p1, which could be
823 # expensive. The revlog caches should have prev cached, meaning
821 # expensive. The revlog caches should have prev cached, meaning
824 # less CPU for changegroup generation. There is likely room to add
822 # less CPU for changegroup generation. There is likely room to add
825 # a flag and/or config option to control this behavior.
823 # a flag and/or config option to control this behavior.
826 return prev
824 return prev
827 elif dp == nullrev:
825 elif dp == nullrev:
828 # revlog is configured to use full snapshot for a reason,
826 # revlog is configured to use full snapshot for a reason,
829 # stick to full snapshot.
827 # stick to full snapshot.
830 return nullrev
828 return nullrev
831 elif dp not in (p1, p2, prev):
829 elif dp not in (p1, p2, prev):
832 # Pick prev when we can't be sure remote has the base revision.
830 # Pick prev when we can't be sure remote has the base revision.
833 return prev
831 return prev
834 else:
832 else:
835 return dp
833 return dp
836
834
837 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
835 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
838 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
836 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
839 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
837 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
840
838
841 class cg3packer(cg2packer):
839 class cg3packer(cg2packer):
842 version = '03'
840 version = '03'
843 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
841 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
844
842
845 def _packmanifests(self, dir, mfnodes, lookuplinknode):
843 def _packmanifests(self, dir, mfnodes, lookuplinknode):
846 if dir:
844 if dir:
847 yield self.fileheader(dir)
845 yield self.fileheader(dir)
848
846
849 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
847 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
850 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
848 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
851 units=_('manifests')):
849 units=_('manifests')):
852 yield chunk
850 yield chunk
853
851
854 def _manifestsdone(self):
852 def _manifestsdone(self):
855 return self.close()
853 return self.close()
856
854
857 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
855 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
858 return struct.pack(
856 return struct.pack(
859 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
857 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
860
858
861 _packermap = {'01': (cg1packer, cg1unpacker),
859 _packermap = {'01': (cg1packer, cg1unpacker),
862 # cg2 adds support for exchanging generaldelta
860 # cg2 adds support for exchanging generaldelta
863 '02': (cg2packer, cg2unpacker),
861 '02': (cg2packer, cg2unpacker),
864 # cg3 adds support for exchanging revlog flags and treemanifests
862 # cg3 adds support for exchanging revlog flags and treemanifests
865 '03': (cg3packer, cg3unpacker),
863 '03': (cg3packer, cg3unpacker),
866 }
864 }
867
865
868 def allsupportedversions(repo):
866 def allsupportedversions(repo):
869 versions = set(_packermap.keys())
867 versions = set(_packermap.keys())
870 if not (repo.ui.configbool('experimental', 'changegroup3') or
868 if not (repo.ui.configbool('experimental', 'changegroup3') or
871 repo.ui.configbool('experimental', 'treemanifest') or
869 repo.ui.configbool('experimental', 'treemanifest') or
872 'treemanifest' in repo.requirements):
870 'treemanifest' in repo.requirements):
873 versions.discard('03')
871 versions.discard('03')
874 return versions
872 return versions
875
873
876 # Changegroup versions that can be applied to the repo
874 # Changegroup versions that can be applied to the repo
877 def supportedincomingversions(repo):
875 def supportedincomingversions(repo):
878 return allsupportedversions(repo)
876 return allsupportedversions(repo)
879
877
880 # Changegroup versions that can be created from the repo
878 # Changegroup versions that can be created from the repo
881 def supportedoutgoingversions(repo):
879 def supportedoutgoingversions(repo):
882 versions = allsupportedversions(repo)
880 versions = allsupportedversions(repo)
883 if 'treemanifest' in repo.requirements:
881 if 'treemanifest' in repo.requirements:
884 # Versions 01 and 02 support only flat manifests and it's just too
882 # Versions 01 and 02 support only flat manifests and it's just too
885 # expensive to convert between the flat manifest and tree manifest on
883 # expensive to convert between the flat manifest and tree manifest on
886 # the fly. Since tree manifests are hashed differently, all of history
884 # the fly. Since tree manifests are hashed differently, all of history
887 # would have to be converted. Instead, we simply don't even pretend to
885 # would have to be converted. Instead, we simply don't even pretend to
888 # support versions 01 and 02.
886 # support versions 01 and 02.
889 versions.discard('01')
887 versions.discard('01')
890 versions.discard('02')
888 versions.discard('02')
891 return versions
889 return versions
892
890
893 def safeversion(repo):
891 def safeversion(repo):
894 # Finds the smallest version that it's safe to assume clients of the repo
892 # Finds the smallest version that it's safe to assume clients of the repo
895 # will support. For example, all hg versions that support generaldelta also
893 # will support. For example, all hg versions that support generaldelta also
896 # support changegroup 02.
894 # support changegroup 02.
897 versions = supportedoutgoingversions(repo)
895 versions = supportedoutgoingversions(repo)
898 if 'generaldelta' in repo.requirements:
896 if 'generaldelta' in repo.requirements:
899 versions.discard('01')
897 versions.discard('01')
900 assert versions
898 assert versions
901 return min(versions)
899 return min(versions)
902
900
903 def getbundler(version, repo, bundlecaps=None):
901 def getbundler(version, repo, bundlecaps=None):
904 assert version in supportedoutgoingversions(repo)
902 assert version in supportedoutgoingversions(repo)
905 return _packermap[version][0](repo, bundlecaps)
903 return _packermap[version][0](repo, bundlecaps)
906
904
907 def getunbundler(version, fh, alg, extras=None):
905 def getunbundler(version, fh, alg, extras=None):
908 return _packermap[version][1](fh, alg, extras=extras)
906 return _packermap[version][1](fh, alg, extras=extras)
909
907
910 def _changegroupinfo(repo, nodes, source):
908 def _changegroupinfo(repo, nodes, source):
911 if repo.ui.verbose or source == 'bundle':
909 if repo.ui.verbose or source == 'bundle':
912 repo.ui.status(_("%d changesets found\n") % len(nodes))
910 repo.ui.status(_("%d changesets found\n") % len(nodes))
913 if repo.ui.debugflag:
911 if repo.ui.debugflag:
914 repo.ui.debug("list of changesets:\n")
912 repo.ui.debug("list of changesets:\n")
915 for node in nodes:
913 for node in nodes:
916 repo.ui.debug("%s\n" % hex(node))
914 repo.ui.debug("%s\n" % hex(node))
917
915
918 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
916 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
919 repo = repo.unfiltered()
917 repo = repo.unfiltered()
920 commonrevs = outgoing.common
918 commonrevs = outgoing.common
921 csets = outgoing.missing
919 csets = outgoing.missing
922 heads = outgoing.missingheads
920 heads = outgoing.missingheads
923 # We go through the fast path if we get told to, or if all (unfiltered
921 # We go through the fast path if we get told to, or if all (unfiltered
924 # heads have been requested (since we then know there all linkrevs will
922 # heads have been requested (since we then know there all linkrevs will
925 # be pulled by the client).
923 # be pulled by the client).
926 heads.sort()
924 heads.sort()
927 fastpathlinkrev = fastpath or (
925 fastpathlinkrev = fastpath or (
928 repo.filtername is None and heads == sorted(repo.heads()))
926 repo.filtername is None and heads == sorted(repo.heads()))
929
927
930 repo.hook('preoutgoing', throw=True, source=source)
928 repo.hook('preoutgoing', throw=True, source=source)
931 _changegroupinfo(repo, csets, source)
929 _changegroupinfo(repo, csets, source)
932 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
930 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
933
931
934 def getsubset(repo, outgoing, bundler, source, fastpath=False):
932 def getsubset(repo, outgoing, bundler, source, fastpath=False):
935 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
933 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
936 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
934 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
937 {'clcount': len(outgoing.missing)})
935 {'clcount': len(outgoing.missing)})
938
936
939 def changegroupsubset(repo, roots, heads, source, version='01'):
937 def changegroupsubset(repo, roots, heads, source, version='01'):
940 """Compute a changegroup consisting of all the nodes that are
938 """Compute a changegroup consisting of all the nodes that are
941 descendants of any of the roots and ancestors of any of the heads.
939 descendants of any of the roots and ancestors of any of the heads.
942 Return a chunkbuffer object whose read() method will return
940 Return a chunkbuffer object whose read() method will return
943 successive changegroup chunks.
941 successive changegroup chunks.
944
942
945 It is fairly complex as determining which filenodes and which
943 It is fairly complex as determining which filenodes and which
946 manifest nodes need to be included for the changeset to be complete
944 manifest nodes need to be included for the changeset to be complete
947 is non-trivial.
945 is non-trivial.
948
946
949 Another wrinkle is doing the reverse, figuring out which changeset in
947 Another wrinkle is doing the reverse, figuring out which changeset in
950 the changegroup a particular filenode or manifestnode belongs to.
948 the changegroup a particular filenode or manifestnode belongs to.
951 """
949 """
952 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
950 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
953 bundler = getbundler(version, repo)
951 bundler = getbundler(version, repo)
954 return getsubset(repo, outgoing, bundler, source)
952 return getsubset(repo, outgoing, bundler, source)
955
953
956 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
954 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
957 version='01'):
955 version='01'):
958 """Like getbundle, but taking a discovery.outgoing as an argument.
956 """Like getbundle, but taking a discovery.outgoing as an argument.
959
957
960 This is only implemented for local repos and reuses potentially
958 This is only implemented for local repos and reuses potentially
961 precomputed sets in outgoing. Returns a raw changegroup generator."""
959 precomputed sets in outgoing. Returns a raw changegroup generator."""
962 if not outgoing.missing:
960 if not outgoing.missing:
963 return None
961 return None
964 bundler = getbundler(version, repo, bundlecaps)
962 bundler = getbundler(version, repo, bundlecaps)
965 return getsubsetraw(repo, outgoing, bundler, source)
963 return getsubsetraw(repo, outgoing, bundler, source)
966
964
967 def getchangegroup(repo, source, outgoing, bundlecaps=None,
965 def getchangegroup(repo, source, outgoing, bundlecaps=None,
968 version='01'):
966 version='01'):
969 """Like getbundle, but taking a discovery.outgoing as an argument.
967 """Like getbundle, but taking a discovery.outgoing as an argument.
970
968
971 This is only implemented for local repos and reuses potentially
969 This is only implemented for local repos and reuses potentially
972 precomputed sets in outgoing."""
970 precomputed sets in outgoing."""
973 if not outgoing.missing:
971 if not outgoing.missing:
974 return None
972 return None
975 bundler = getbundler(version, repo, bundlecaps)
973 bundler = getbundler(version, repo, bundlecaps)
976 return getsubset(repo, outgoing, bundler, source)
974 return getsubset(repo, outgoing, bundler, source)
977
975
978 def getlocalchangegroup(repo, *args, **kwargs):
976 def getlocalchangegroup(repo, *args, **kwargs):
979 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
977 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
980 '4.3')
978 '4.3')
981 return getchangegroup(repo, *args, **kwargs)
979 return getchangegroup(repo, *args, **kwargs)
982
980
983 def changegroup(repo, basenodes, source):
981 def changegroup(repo, basenodes, source):
984 # to avoid a race we use changegroupsubset() (issue1320)
982 # to avoid a race we use changegroupsubset() (issue1320)
985 return changegroupsubset(repo, basenodes, repo.heads(), source)
983 return changegroupsubset(repo, basenodes, repo.heads(), source)
986
984
987 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
985 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
988 revisions = 0
986 revisions = 0
989 files = 0
987 files = 0
990 for chunkdata in iter(source.filelogheader, {}):
988 for chunkdata in iter(source.filelogheader, {}):
991 files += 1
989 files += 1
992 f = chunkdata["filename"]
990 f = chunkdata["filename"]
993 repo.ui.debug("adding %s revisions\n" % f)
991 repo.ui.debug("adding %s revisions\n" % f)
994 repo.ui.progress(_('files'), files, unit=_('files'),
992 repo.ui.progress(_('files'), files, unit=_('files'),
995 total=expectedfiles)
993 total=expectedfiles)
996 fl = repo.file(f)
994 fl = repo.file(f)
997 o = len(fl)
995 o = len(fl)
998 try:
996 try:
999 if not fl.addgroup(source, revmap, trp):
997 if not fl.addgroup(source, revmap, trp):
1000 raise error.Abort(_("received file revlog group is empty"))
998 raise error.Abort(_("received file revlog group is empty"))
1001 except error.CensoredBaseError as e:
999 except error.CensoredBaseError as e:
1002 raise error.Abort(_("received delta base is censored: %s") % e)
1000 raise error.Abort(_("received delta base is censored: %s") % e)
1003 revisions += len(fl) - o
1001 revisions += len(fl) - o
1004 if f in needfiles:
1002 if f in needfiles:
1005 needs = needfiles[f]
1003 needs = needfiles[f]
1006 for new in xrange(o, len(fl)):
1004 for new in xrange(o, len(fl)):
1007 n = fl.node(new)
1005 n = fl.node(new)
1008 if n in needs:
1006 if n in needs:
1009 needs.remove(n)
1007 needs.remove(n)
1010 else:
1008 else:
1011 raise error.Abort(
1009 raise error.Abort(
1012 _("received spurious file revlog entry"))
1010 _("received spurious file revlog entry"))
1013 if not needs:
1011 if not needs:
1014 del needfiles[f]
1012 del needfiles[f]
1015 repo.ui.progress(_('files'), None)
1013 repo.ui.progress(_('files'), None)
1016
1014
1017 for f, needs in needfiles.iteritems():
1015 for f, needs in needfiles.iteritems():
1018 fl = repo.file(f)
1016 fl = repo.file(f)
1019 for n in needs:
1017 for n in needs:
1020 try:
1018 try:
1021 fl.rev(n)
1019 fl.rev(n)
1022 except error.LookupError:
1020 except error.LookupError:
1023 raise error.Abort(
1021 raise error.Abort(
1024 _('missing file data for %s:%s - run hg verify') %
1022 _('missing file data for %s:%s - run hg verify') %
1025 (f, hex(n)))
1023 (f, hex(n)))
1026
1024
1027 return revisions, files
1025 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now