##// END OF EJS Templates
changegroup: deprecate 'getlocalchangroup' (API)...
marmoute -
r32168:4e6aab69 default
parent child Browse files
Show More
@@ -1,1026 +1,1028 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 branchmap,
23 branchmap,
24 dagutil,
24 dagutil,
25 discovery,
25 discovery,
26 error,
26 error,
27 mdiff,
27 mdiff,
28 phases,
28 phases,
29 pycompat,
29 pycompat,
30 util,
30 util,
31 )
31 )
32
32
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36
36
37 def readexactly(stream, n):
37 def readexactly(stream, n):
38 '''read n bytes from stream.read and abort if less was available'''
38 '''read n bytes from stream.read and abort if less was available'''
39 s = stream.read(n)
39 s = stream.read(n)
40 if len(s) < n:
40 if len(s) < n:
41 raise error.Abort(_("stream ended unexpectedly"
41 raise error.Abort(_("stream ended unexpectedly"
42 " (got %d bytes, expected %d)")
42 " (got %d bytes, expected %d)")
43 % (len(s), n))
43 % (len(s), n))
44 return s
44 return s
45
45
46 def getchunk(stream):
46 def getchunk(stream):
47 """return the next chunk from stream as a string"""
47 """return the next chunk from stream as a string"""
48 d = readexactly(stream, 4)
48 d = readexactly(stream, 4)
49 l = struct.unpack(">l", d)[0]
49 l = struct.unpack(">l", d)[0]
50 if l <= 4:
50 if l <= 4:
51 if l:
51 if l:
52 raise error.Abort(_("invalid chunk length %d") % l)
52 raise error.Abort(_("invalid chunk length %d") % l)
53 return ""
53 return ""
54 return readexactly(stream, l - 4)
54 return readexactly(stream, l - 4)
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(">l", length + 4)
58 return struct.pack(">l", length + 4)
59
59
60 def closechunk():
60 def closechunk():
61 """return a changegroup chunk header (string) for a zero-length chunk"""
61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 return struct.pack(">l", 0)
62 return struct.pack(">l", 0)
63
63
64 def combineresults(results):
64 def combineresults(results):
65 """logic to combine 0 or more addchangegroup results into one"""
65 """logic to combine 0 or more addchangegroup results into one"""
66 changedheads = 0
66 changedheads = 0
67 result = 1
67 result = 1
68 for ret in results:
68 for ret in results:
69 # If any changegroup result is 0, return 0
69 # If any changegroup result is 0, return 0
70 if ret == 0:
70 if ret == 0:
71 result = 0
71 result = 0
72 break
72 break
73 if ret < -1:
73 if ret < -1:
74 changedheads += ret + 1
74 changedheads += ret + 1
75 elif ret > 1:
75 elif ret > 1:
76 changedheads += ret - 1
76 changedheads += ret - 1
77 if changedheads > 0:
77 if changedheads > 0:
78 result = 1 + changedheads
78 result = 1 + changedheads
79 elif changedheads < 0:
79 elif changedheads < 0:
80 result = -1 + changedheads
80 result = -1 + changedheads
81 return result
81 return result
82
82
83 def writechunks(ui, chunks, filename, vfs=None):
83 def writechunks(ui, chunks, filename, vfs=None):
84 """Write chunks to a file and return its filename.
84 """Write chunks to a file and return its filename.
85
85
86 The stream is assumed to be a bundle file.
86 The stream is assumed to be a bundle file.
87 Existing files will not be overwritten.
87 Existing files will not be overwritten.
88 If no filename is specified, a temporary file is created.
88 If no filename is specified, a temporary file is created.
89 """
89 """
90 fh = None
90 fh = None
91 cleanup = None
91 cleanup = None
92 try:
92 try:
93 if filename:
93 if filename:
94 if vfs:
94 if vfs:
95 fh = vfs.open(filename, "wb")
95 fh = vfs.open(filename, "wb")
96 else:
96 else:
97 # Increase default buffer size because default is usually
97 # Increase default buffer size because default is usually
98 # small (4k is common on Linux).
98 # small (4k is common on Linux).
99 fh = open(filename, "wb", 131072)
99 fh = open(filename, "wb", 131072)
100 else:
100 else:
101 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
102 fh = os.fdopen(fd, pycompat.sysstr("wb"))
102 fh = os.fdopen(fd, pycompat.sysstr("wb"))
103 cleanup = filename
103 cleanup = filename
104 for c in chunks:
104 for c in chunks:
105 fh.write(c)
105 fh.write(c)
106 cleanup = None
106 cleanup = None
107 return filename
107 return filename
108 finally:
108 finally:
109 if fh is not None:
109 if fh is not None:
110 fh.close()
110 fh.close()
111 if cleanup is not None:
111 if cleanup is not None:
112 if filename and vfs:
112 if filename and vfs:
113 vfs.unlink(cleanup)
113 vfs.unlink(cleanup)
114 else:
114 else:
115 os.unlink(cleanup)
115 os.unlink(cleanup)
116
116
117 class cg1unpacker(object):
117 class cg1unpacker(object):
118 """Unpacker for cg1 changegroup streams.
118 """Unpacker for cg1 changegroup streams.
119
119
120 A changegroup unpacker handles the framing of the revision data in
120 A changegroup unpacker handles the framing of the revision data in
121 the wire format. Most consumers will want to use the apply()
121 the wire format. Most consumers will want to use the apply()
122 method to add the changes from the changegroup to a repository.
122 method to add the changes from the changegroup to a repository.
123
123
124 If you're forwarding a changegroup unmodified to another consumer,
124 If you're forwarding a changegroup unmodified to another consumer,
125 use getchunks(), which returns an iterator of changegroup
125 use getchunks(), which returns an iterator of changegroup
126 chunks. This is mostly useful for cases where you need to know the
126 chunks. This is mostly useful for cases where you need to know the
127 data stream has ended by observing the end of the changegroup.
127 data stream has ended by observing the end of the changegroup.
128
128
129 deltachunk() is useful only if you're applying delta data. Most
129 deltachunk() is useful only if you're applying delta data. Most
130 consumers should prefer apply() instead.
130 consumers should prefer apply() instead.
131
131
132 A few other public methods exist. Those are used only for
132 A few other public methods exist. Those are used only for
133 bundlerepo and some debug commands - their use is discouraged.
133 bundlerepo and some debug commands - their use is discouraged.
134 """
134 """
135 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
135 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
136 deltaheadersize = struct.calcsize(deltaheader)
136 deltaheadersize = struct.calcsize(deltaheader)
137 version = '01'
137 version = '01'
138 _grouplistcount = 1 # One list of files after the manifests
138 _grouplistcount = 1 # One list of files after the manifests
139
139
140 def __init__(self, fh, alg, extras=None):
140 def __init__(self, fh, alg, extras=None):
141 if alg is None:
141 if alg is None:
142 alg = 'UN'
142 alg = 'UN'
143 if alg not in util.compengines.supportedbundletypes:
143 if alg not in util.compengines.supportedbundletypes:
144 raise error.Abort(_('unknown stream compression type: %s')
144 raise error.Abort(_('unknown stream compression type: %s')
145 % alg)
145 % alg)
146 if alg == 'BZ':
146 if alg == 'BZ':
147 alg = '_truncatedBZ'
147 alg = '_truncatedBZ'
148
148
149 compengine = util.compengines.forbundletype(alg)
149 compengine = util.compengines.forbundletype(alg)
150 self._stream = compengine.decompressorreader(fh)
150 self._stream = compengine.decompressorreader(fh)
151 self._type = alg
151 self._type = alg
152 self.extras = extras or {}
152 self.extras = extras or {}
153 self.callback = None
153 self.callback = None
154
154
155 # These methods (compressed, read, seek, tell) all appear to only
155 # These methods (compressed, read, seek, tell) all appear to only
156 # be used by bundlerepo, but it's a little hard to tell.
156 # be used by bundlerepo, but it's a little hard to tell.
157 def compressed(self):
157 def compressed(self):
158 return self._type is not None and self._type != 'UN'
158 return self._type is not None and self._type != 'UN'
159 def read(self, l):
159 def read(self, l):
160 return self._stream.read(l)
160 return self._stream.read(l)
161 def seek(self, pos):
161 def seek(self, pos):
162 return self._stream.seek(pos)
162 return self._stream.seek(pos)
163 def tell(self):
163 def tell(self):
164 return self._stream.tell()
164 return self._stream.tell()
165 def close(self):
165 def close(self):
166 return self._stream.close()
166 return self._stream.close()
167
167
168 def _chunklength(self):
168 def _chunklength(self):
169 d = readexactly(self._stream, 4)
169 d = readexactly(self._stream, 4)
170 l = struct.unpack(">l", d)[0]
170 l = struct.unpack(">l", d)[0]
171 if l <= 4:
171 if l <= 4:
172 if l:
172 if l:
173 raise error.Abort(_("invalid chunk length %d") % l)
173 raise error.Abort(_("invalid chunk length %d") % l)
174 return 0
174 return 0
175 if self.callback:
175 if self.callback:
176 self.callback()
176 self.callback()
177 return l - 4
177 return l - 4
178
178
179 def changelogheader(self):
179 def changelogheader(self):
180 """v10 does not have a changelog header chunk"""
180 """v10 does not have a changelog header chunk"""
181 return {}
181 return {}
182
182
183 def manifestheader(self):
183 def manifestheader(self):
184 """v10 does not have a manifest header chunk"""
184 """v10 does not have a manifest header chunk"""
185 return {}
185 return {}
186
186
187 def filelogheader(self):
187 def filelogheader(self):
188 """return the header of the filelogs chunk, v10 only has the filename"""
188 """return the header of the filelogs chunk, v10 only has the filename"""
189 l = self._chunklength()
189 l = self._chunklength()
190 if not l:
190 if not l:
191 return {}
191 return {}
192 fname = readexactly(self._stream, l)
192 fname = readexactly(self._stream, l)
193 return {'filename': fname}
193 return {'filename': fname}
194
194
195 def _deltaheader(self, headertuple, prevnode):
195 def _deltaheader(self, headertuple, prevnode):
196 node, p1, p2, cs = headertuple
196 node, p1, p2, cs = headertuple
197 if prevnode is None:
197 if prevnode is None:
198 deltabase = p1
198 deltabase = p1
199 else:
199 else:
200 deltabase = prevnode
200 deltabase = prevnode
201 flags = 0
201 flags = 0
202 return node, p1, p2, deltabase, cs, flags
202 return node, p1, p2, deltabase, cs, flags
203
203
204 def deltachunk(self, prevnode):
204 def deltachunk(self, prevnode):
205 l = self._chunklength()
205 l = self._chunklength()
206 if not l:
206 if not l:
207 return {}
207 return {}
208 headerdata = readexactly(self._stream, self.deltaheadersize)
208 headerdata = readexactly(self._stream, self.deltaheadersize)
209 header = struct.unpack(self.deltaheader, headerdata)
209 header = struct.unpack(self.deltaheader, headerdata)
210 delta = readexactly(self._stream, l - self.deltaheadersize)
210 delta = readexactly(self._stream, l - self.deltaheadersize)
211 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
211 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
212 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
212 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
213 'deltabase': deltabase, 'delta': delta, 'flags': flags}
213 'deltabase': deltabase, 'delta': delta, 'flags': flags}
214
214
215 def getchunks(self):
215 def getchunks(self):
216 """returns all the chunks contains in the bundle
216 """returns all the chunks contains in the bundle
217
217
218 Used when you need to forward the binary stream to a file or another
218 Used when you need to forward the binary stream to a file or another
219 network API. To do so, it parse the changegroup data, otherwise it will
219 network API. To do so, it parse the changegroup data, otherwise it will
220 block in case of sshrepo because it don't know the end of the stream.
220 block in case of sshrepo because it don't know the end of the stream.
221 """
221 """
222 # an empty chunkgroup is the end of the changegroup
222 # an empty chunkgroup is the end of the changegroup
223 # a changegroup has at least 2 chunkgroups (changelog and manifest).
223 # a changegroup has at least 2 chunkgroups (changelog and manifest).
224 # after that, changegroup versions 1 and 2 have a series of groups
224 # after that, changegroup versions 1 and 2 have a series of groups
225 # with one group per file. changegroup 3 has a series of directory
225 # with one group per file. changegroup 3 has a series of directory
226 # manifests before the files.
226 # manifests before the files.
227 count = 0
227 count = 0
228 emptycount = 0
228 emptycount = 0
229 while emptycount < self._grouplistcount:
229 while emptycount < self._grouplistcount:
230 empty = True
230 empty = True
231 count += 1
231 count += 1
232 while True:
232 while True:
233 chunk = getchunk(self)
233 chunk = getchunk(self)
234 if not chunk:
234 if not chunk:
235 if empty and count > 2:
235 if empty and count > 2:
236 emptycount += 1
236 emptycount += 1
237 break
237 break
238 empty = False
238 empty = False
239 yield chunkheader(len(chunk))
239 yield chunkheader(len(chunk))
240 pos = 0
240 pos = 0
241 while pos < len(chunk):
241 while pos < len(chunk):
242 next = pos + 2**20
242 next = pos + 2**20
243 yield chunk[pos:next]
243 yield chunk[pos:next]
244 pos = next
244 pos = next
245 yield closechunk()
245 yield closechunk()
246
246
247 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
247 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
248 # We know that we'll never have more manifests than we had
248 # We know that we'll never have more manifests than we had
249 # changesets.
249 # changesets.
250 self.callback = prog(_('manifests'), numchanges)
250 self.callback = prog(_('manifests'), numchanges)
251 # no need to check for empty manifest group here:
251 # no need to check for empty manifest group here:
252 # if the result of the merge of 1 and 2 is the same in 3 and 4,
252 # if the result of the merge of 1 and 2 is the same in 3 and 4,
253 # no new manifest will be created and the manifest group will
253 # no new manifest will be created and the manifest group will
254 # be empty during the pull
254 # be empty during the pull
255 self.manifestheader()
255 self.manifestheader()
256 repo.manifestlog._revlog.addgroup(self, revmap, trp)
256 repo.manifestlog._revlog.addgroup(self, revmap, trp)
257 repo.ui.progress(_('manifests'), None)
257 repo.ui.progress(_('manifests'), None)
258 self.callback = None
258 self.callback = None
259
259
260 def apply(self, repo, srctype, url, emptyok=False,
260 def apply(self, repo, srctype, url, emptyok=False,
261 targetphase=phases.draft, expectedtotal=None):
261 targetphase=phases.draft, expectedtotal=None):
262 """Add the changegroup returned by source.read() to this repo.
262 """Add the changegroup returned by source.read() to this repo.
263 srctype is a string like 'push', 'pull', or 'unbundle'. url is
263 srctype is a string like 'push', 'pull', or 'unbundle'. url is
264 the URL of the repo where this changegroup is coming from.
264 the URL of the repo where this changegroup is coming from.
265
265
266 Return an integer summarizing the change to this repo:
266 Return an integer summarizing the change to this repo:
267 - nothing changed or no source: 0
267 - nothing changed or no source: 0
268 - more heads than before: 1+added heads (2..n)
268 - more heads than before: 1+added heads (2..n)
269 - fewer heads than before: -1-removed heads (-2..-n)
269 - fewer heads than before: -1-removed heads (-2..-n)
270 - number of heads stays the same: 1
270 - number of heads stays the same: 1
271 """
271 """
272 repo = repo.unfiltered()
272 repo = repo.unfiltered()
273 def csmap(x):
273 def csmap(x):
274 repo.ui.debug("add changeset %s\n" % short(x))
274 repo.ui.debug("add changeset %s\n" % short(x))
275 return len(cl)
275 return len(cl)
276
276
277 def revmap(x):
277 def revmap(x):
278 return cl.rev(x)
278 return cl.rev(x)
279
279
280 changesets = files = revisions = 0
280 changesets = files = revisions = 0
281
281
282 try:
282 try:
283 with repo.transaction("\n".join([srctype,
283 with repo.transaction("\n".join([srctype,
284 util.hidepassword(url)])) as tr:
284 util.hidepassword(url)])) as tr:
285 # The transaction could have been created before and already
285 # The transaction could have been created before and already
286 # carries source information. In this case we use the top
286 # carries source information. In this case we use the top
287 # level data. We overwrite the argument because we need to use
287 # level data. We overwrite the argument because we need to use
288 # the top level value (if they exist) in this function.
288 # the top level value (if they exist) in this function.
289 srctype = tr.hookargs.setdefault('source', srctype)
289 srctype = tr.hookargs.setdefault('source', srctype)
290 url = tr.hookargs.setdefault('url', url)
290 url = tr.hookargs.setdefault('url', url)
291 repo.hook('prechangegroup', throw=True, **tr.hookargs)
291 repo.hook('prechangegroup', throw=True, **tr.hookargs)
292
292
293 # write changelog data to temp files so concurrent readers
293 # write changelog data to temp files so concurrent readers
294 # will not see an inconsistent view
294 # will not see an inconsistent view
295 cl = repo.changelog
295 cl = repo.changelog
296 cl.delayupdate(tr)
296 cl.delayupdate(tr)
297 oldheads = set(cl.heads())
297 oldheads = set(cl.heads())
298
298
299 trp = weakref.proxy(tr)
299 trp = weakref.proxy(tr)
300 # pull off the changeset group
300 # pull off the changeset group
301 repo.ui.status(_("adding changesets\n"))
301 repo.ui.status(_("adding changesets\n"))
302 clstart = len(cl)
302 clstart = len(cl)
303 class prog(object):
303 class prog(object):
304 def __init__(self, step, total):
304 def __init__(self, step, total):
305 self._step = step
305 self._step = step
306 self._total = total
306 self._total = total
307 self._count = 1
307 self._count = 1
308 def __call__(self):
308 def __call__(self):
309 repo.ui.progress(self._step, self._count,
309 repo.ui.progress(self._step, self._count,
310 unit=_('chunks'), total=self._total)
310 unit=_('chunks'), total=self._total)
311 self._count += 1
311 self._count += 1
312 self.callback = prog(_('changesets'), expectedtotal)
312 self.callback = prog(_('changesets'), expectedtotal)
313
313
314 efiles = set()
314 efiles = set()
315 def onchangelog(cl, node):
315 def onchangelog(cl, node):
316 efiles.update(cl.readfiles(node))
316 efiles.update(cl.readfiles(node))
317
317
318 self.changelogheader()
318 self.changelogheader()
319 srccontent = cl.addgroup(self, csmap, trp,
319 srccontent = cl.addgroup(self, csmap, trp,
320 addrevisioncb=onchangelog)
320 addrevisioncb=onchangelog)
321 efiles = len(efiles)
321 efiles = len(efiles)
322
322
323 if not (srccontent or emptyok):
323 if not (srccontent or emptyok):
324 raise error.Abort(_("received changelog group is empty"))
324 raise error.Abort(_("received changelog group is empty"))
325 clend = len(cl)
325 clend = len(cl)
326 changesets = clend - clstart
326 changesets = clend - clstart
327 repo.ui.progress(_('changesets'), None)
327 repo.ui.progress(_('changesets'), None)
328 self.callback = None
328 self.callback = None
329
329
330 # pull off the manifest group
330 # pull off the manifest group
331 repo.ui.status(_("adding manifests\n"))
331 repo.ui.status(_("adding manifests\n"))
332 self._unpackmanifests(repo, revmap, trp, prog, changesets)
332 self._unpackmanifests(repo, revmap, trp, prog, changesets)
333
333
334 needfiles = {}
334 needfiles = {}
335 if repo.ui.configbool('server', 'validate', default=False):
335 if repo.ui.configbool('server', 'validate', default=False):
336 cl = repo.changelog
336 cl = repo.changelog
337 ml = repo.manifestlog
337 ml = repo.manifestlog
338 # validate incoming csets have their manifests
338 # validate incoming csets have their manifests
339 for cset in xrange(clstart, clend):
339 for cset in xrange(clstart, clend):
340 mfnode = cl.changelogrevision(cset).manifest
340 mfnode = cl.changelogrevision(cset).manifest
341 mfest = ml[mfnode].readdelta()
341 mfest = ml[mfnode].readdelta()
342 # store file nodes we must see
342 # store file nodes we must see
343 for f, n in mfest.iteritems():
343 for f, n in mfest.iteritems():
344 needfiles.setdefault(f, set()).add(n)
344 needfiles.setdefault(f, set()).add(n)
345
345
346 # process the files
346 # process the files
347 repo.ui.status(_("adding file changes\n"))
347 repo.ui.status(_("adding file changes\n"))
348 newrevs, newfiles = _addchangegroupfiles(
348 newrevs, newfiles = _addchangegroupfiles(
349 repo, self, revmap, trp, efiles, needfiles)
349 repo, self, revmap, trp, efiles, needfiles)
350 revisions += newrevs
350 revisions += newrevs
351 files += newfiles
351 files += newfiles
352
352
353 dh = 0
353 dh = 0
354 if oldheads:
354 if oldheads:
355 heads = cl.heads()
355 heads = cl.heads()
356 dh = len(heads) - len(oldheads)
356 dh = len(heads) - len(oldheads)
357 for h in heads:
357 for h in heads:
358 if h not in oldheads and repo[h].closesbranch():
358 if h not in oldheads and repo[h].closesbranch():
359 dh -= 1
359 dh -= 1
360 htext = ""
360 htext = ""
361 if dh:
361 if dh:
362 htext = _(" (%+d heads)") % dh
362 htext = _(" (%+d heads)") % dh
363
363
364 repo.ui.status(_("added %d changesets"
364 repo.ui.status(_("added %d changesets"
365 " with %d changes to %d files%s\n")
365 " with %d changes to %d files%s\n")
366 % (changesets, revisions, files, htext))
366 % (changesets, revisions, files, htext))
367 repo.invalidatevolatilesets()
367 repo.invalidatevolatilesets()
368
368
369 if changesets > 0:
369 if changesets > 0:
370 if 'node' not in tr.hookargs:
370 if 'node' not in tr.hookargs:
371 tr.hookargs['node'] = hex(cl.node(clstart))
371 tr.hookargs['node'] = hex(cl.node(clstart))
372 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
372 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
373 hookargs = dict(tr.hookargs)
373 hookargs = dict(tr.hookargs)
374 else:
374 else:
375 hookargs = dict(tr.hookargs)
375 hookargs = dict(tr.hookargs)
376 hookargs['node'] = hex(cl.node(clstart))
376 hookargs['node'] = hex(cl.node(clstart))
377 hookargs['node_last'] = hex(cl.node(clend - 1))
377 hookargs['node_last'] = hex(cl.node(clend - 1))
378 repo.hook('pretxnchangegroup', throw=True, **hookargs)
378 repo.hook('pretxnchangegroup', throw=True, **hookargs)
379
379
380 added = [cl.node(r) for r in xrange(clstart, clend)]
380 added = [cl.node(r) for r in xrange(clstart, clend)]
381 publishing = repo.publishing()
381 publishing = repo.publishing()
382 if srctype in ('push', 'serve'):
382 if srctype in ('push', 'serve'):
383 # Old servers can not push the boundary themselves.
383 # Old servers can not push the boundary themselves.
384 # New servers won't push the boundary if changeset already
384 # New servers won't push the boundary if changeset already
385 # exists locally as secret
385 # exists locally as secret
386 #
386 #
387 # We should not use added here but the list of all change in
387 # We should not use added here but the list of all change in
388 # the bundle
388 # the bundle
389 if publishing:
389 if publishing:
390 phases.advanceboundary(repo, tr, phases.public,
390 phases.advanceboundary(repo, tr, phases.public,
391 srccontent)
391 srccontent)
392 else:
392 else:
393 # Those changesets have been pushed from the
393 # Those changesets have been pushed from the
394 # outside, their phases are going to be pushed
394 # outside, their phases are going to be pushed
395 # alongside. Therefor `targetphase` is
395 # alongside. Therefor `targetphase` is
396 # ignored.
396 # ignored.
397 phases.advanceboundary(repo, tr, phases.draft,
397 phases.advanceboundary(repo, tr, phases.draft,
398 srccontent)
398 srccontent)
399 phases.retractboundary(repo, tr, phases.draft, added)
399 phases.retractboundary(repo, tr, phases.draft, added)
400 elif srctype != 'strip':
400 elif srctype != 'strip':
401 # publishing only alter behavior during push
401 # publishing only alter behavior during push
402 #
402 #
403 # strip should not touch boundary at all
403 # strip should not touch boundary at all
404 phases.retractboundary(repo, tr, targetphase, added)
404 phases.retractboundary(repo, tr, targetphase, added)
405
405
406 if changesets > 0:
406 if changesets > 0:
407 if srctype != 'strip':
407 if srctype != 'strip':
408 # During strip, branchcache is invalid but
408 # During strip, branchcache is invalid but
409 # coming call to `destroyed` will repair it.
409 # coming call to `destroyed` will repair it.
410 # In other case we can safely update cache on
410 # In other case we can safely update cache on
411 # disk.
411 # disk.
412 repo.ui.debug('updating the branch cache\n')
412 repo.ui.debug('updating the branch cache\n')
413 branchmap.updatecache(repo.filtered('served'))
413 branchmap.updatecache(repo.filtered('served'))
414
414
415 def runhooks():
415 def runhooks():
416 # These hooks run when the lock releases, not when the
416 # These hooks run when the lock releases, not when the
417 # transaction closes. So it's possible for the changelog
417 # transaction closes. So it's possible for the changelog
418 # to have changed since we last saw it.
418 # to have changed since we last saw it.
419 if clstart >= len(repo):
419 if clstart >= len(repo):
420 return
420 return
421
421
422 repo.hook("changegroup", **hookargs)
422 repo.hook("changegroup", **hookargs)
423
423
424 for n in added:
424 for n in added:
425 args = hookargs.copy()
425 args = hookargs.copy()
426 args['node'] = hex(n)
426 args['node'] = hex(n)
427 del args['node_last']
427 del args['node_last']
428 repo.hook("incoming", **args)
428 repo.hook("incoming", **args)
429
429
430 newheads = [h for h in repo.heads()
430 newheads = [h for h in repo.heads()
431 if h not in oldheads]
431 if h not in oldheads]
432 repo.ui.log("incoming",
432 repo.ui.log("incoming",
433 "%s incoming changes - new heads: %s\n",
433 "%s incoming changes - new heads: %s\n",
434 len(added),
434 len(added),
435 ', '.join([hex(c[:6]) for c in newheads]))
435 ', '.join([hex(c[:6]) for c in newheads]))
436
436
437 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
437 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
438 lambda tr: repo._afterlock(runhooks))
438 lambda tr: repo._afterlock(runhooks))
439 finally:
439 finally:
440 repo.ui.flush()
440 repo.ui.flush()
441 # never return 0 here:
441 # never return 0 here:
442 if dh < 0:
442 if dh < 0:
443 return dh - 1
443 return dh - 1
444 else:
444 else:
445 return dh + 1
445 return dh + 1
446
446
447 class cg2unpacker(cg1unpacker):
447 class cg2unpacker(cg1unpacker):
448 """Unpacker for cg2 streams.
448 """Unpacker for cg2 streams.
449
449
450 cg2 streams add support for generaldelta, so the delta header
450 cg2 streams add support for generaldelta, so the delta header
451 format is slightly different. All other features about the data
451 format is slightly different. All other features about the data
452 remain the same.
452 remain the same.
453 """
453 """
454 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
454 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
455 deltaheadersize = struct.calcsize(deltaheader)
455 deltaheadersize = struct.calcsize(deltaheader)
456 version = '02'
456 version = '02'
457
457
458 def _deltaheader(self, headertuple, prevnode):
458 def _deltaheader(self, headertuple, prevnode):
459 node, p1, p2, deltabase, cs = headertuple
459 node, p1, p2, deltabase, cs = headertuple
460 flags = 0
460 flags = 0
461 return node, p1, p2, deltabase, cs, flags
461 return node, p1, p2, deltabase, cs, flags
462
462
463 class cg3unpacker(cg2unpacker):
463 class cg3unpacker(cg2unpacker):
464 """Unpacker for cg3 streams.
464 """Unpacker for cg3 streams.
465
465
466 cg3 streams add support for exchanging treemanifests and revlog
466 cg3 streams add support for exchanging treemanifests and revlog
467 flags. It adds the revlog flags to the delta header and an empty chunk
467 flags. It adds the revlog flags to the delta header and an empty chunk
468 separating manifests and files.
468 separating manifests and files.
469 """
469 """
470 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
470 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
471 deltaheadersize = struct.calcsize(deltaheader)
471 deltaheadersize = struct.calcsize(deltaheader)
472 version = '03'
472 version = '03'
473 _grouplistcount = 2 # One list of manifests and one list of files
473 _grouplistcount = 2 # One list of manifests and one list of files
474
474
475 def _deltaheader(self, headertuple, prevnode):
475 def _deltaheader(self, headertuple, prevnode):
476 node, p1, p2, deltabase, cs, flags = headertuple
476 node, p1, p2, deltabase, cs, flags = headertuple
477 return node, p1, p2, deltabase, cs, flags
477 return node, p1, p2, deltabase, cs, flags
478
478
479 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
479 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
480 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
480 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
481 numchanges)
481 numchanges)
482 for chunkdata in iter(self.filelogheader, {}):
482 for chunkdata in iter(self.filelogheader, {}):
483 # If we get here, there are directory manifests in the changegroup
483 # If we get here, there are directory manifests in the changegroup
484 d = chunkdata["filename"]
484 d = chunkdata["filename"]
485 repo.ui.debug("adding %s revisions\n" % d)
485 repo.ui.debug("adding %s revisions\n" % d)
486 dirlog = repo.manifestlog._revlog.dirlog(d)
486 dirlog = repo.manifestlog._revlog.dirlog(d)
487 if not dirlog.addgroup(self, revmap, trp):
487 if not dirlog.addgroup(self, revmap, trp):
488 raise error.Abort(_("received dir revlog group is empty"))
488 raise error.Abort(_("received dir revlog group is empty"))
489
489
490 class headerlessfixup(object):
490 class headerlessfixup(object):
491 def __init__(self, fh, h):
491 def __init__(self, fh, h):
492 self._h = h
492 self._h = h
493 self._fh = fh
493 self._fh = fh
494 def read(self, n):
494 def read(self, n):
495 if self._h:
495 if self._h:
496 d, self._h = self._h[:n], self._h[n:]
496 d, self._h = self._h[:n], self._h[n:]
497 if len(d) < n:
497 if len(d) < n:
498 d += readexactly(self._fh, n - len(d))
498 d += readexactly(self._fh, n - len(d))
499 return d
499 return d
500 return readexactly(self._fh, n)
500 return readexactly(self._fh, n)
501
501
502 class cg1packer(object):
502 class cg1packer(object):
503 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
503 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
504 version = '01'
504 version = '01'
505 def __init__(self, repo):
505 def __init__(self, repo):
506 """Given a source repo, construct a bundler.
506 """Given a source repo, construct a bundler.
507 """
507 """
508 # experimental config: bundle.reorder
508 # experimental config: bundle.reorder
509 reorder = repo.ui.config('bundle', 'reorder', 'auto')
509 reorder = repo.ui.config('bundle', 'reorder', 'auto')
510 if reorder == 'auto':
510 if reorder == 'auto':
511 reorder = None
511 reorder = None
512 else:
512 else:
513 reorder = util.parsebool(reorder)
513 reorder = util.parsebool(reorder)
514 self._repo = repo
514 self._repo = repo
515 self._reorder = reorder
515 self._reorder = reorder
516 self._progress = repo.ui.progress
516 self._progress = repo.ui.progress
517 if self._repo.ui.verbose and not self._repo.ui.debugflag:
517 if self._repo.ui.verbose and not self._repo.ui.debugflag:
518 self._verbosenote = self._repo.ui.note
518 self._verbosenote = self._repo.ui.note
519 else:
519 else:
520 self._verbosenote = lambda s: None
520 self._verbosenote = lambda s: None
521
521
522 def close(self):
522 def close(self):
523 return closechunk()
523 return closechunk()
524
524
525 def fileheader(self, fname):
525 def fileheader(self, fname):
526 return chunkheader(len(fname)) + fname
526 return chunkheader(len(fname)) + fname
527
527
528 # Extracted both for clarity and for overriding in extensions.
528 # Extracted both for clarity and for overriding in extensions.
529 def _sortgroup(self, revlog, nodelist, lookup):
529 def _sortgroup(self, revlog, nodelist, lookup):
530 """Sort nodes for change group and turn them into revnums."""
530 """Sort nodes for change group and turn them into revnums."""
531 # for generaldelta revlogs, we linearize the revs; this will both be
531 # for generaldelta revlogs, we linearize the revs; this will both be
532 # much quicker and generate a much smaller bundle
532 # much quicker and generate a much smaller bundle
533 if (revlog._generaldelta and self._reorder is None) or self._reorder:
533 if (revlog._generaldelta and self._reorder is None) or self._reorder:
534 dag = dagutil.revlogdag(revlog)
534 dag = dagutil.revlogdag(revlog)
535 return dag.linearize(set(revlog.rev(n) for n in nodelist))
535 return dag.linearize(set(revlog.rev(n) for n in nodelist))
536 else:
536 else:
537 return sorted([revlog.rev(n) for n in nodelist])
537 return sorted([revlog.rev(n) for n in nodelist])
538
538
539 def group(self, nodelist, revlog, lookup, units=None):
539 def group(self, nodelist, revlog, lookup, units=None):
540 """Calculate a delta group, yielding a sequence of changegroup chunks
540 """Calculate a delta group, yielding a sequence of changegroup chunks
541 (strings).
541 (strings).
542
542
543 Given a list of changeset revs, return a set of deltas and
543 Given a list of changeset revs, return a set of deltas and
544 metadata corresponding to nodes. The first delta is
544 metadata corresponding to nodes. The first delta is
545 first parent(nodelist[0]) -> nodelist[0], the receiver is
545 first parent(nodelist[0]) -> nodelist[0], the receiver is
546 guaranteed to have this parent as it has all history before
546 guaranteed to have this parent as it has all history before
547 these changesets. In the case firstparent is nullrev the
547 these changesets. In the case firstparent is nullrev the
548 changegroup starts with a full revision.
548 changegroup starts with a full revision.
549
549
550 If units is not None, progress detail will be generated, units specifies
550 If units is not None, progress detail will be generated, units specifies
551 the type of revlog that is touched (changelog, manifest, etc.).
551 the type of revlog that is touched (changelog, manifest, etc.).
552 """
552 """
553 # if we don't have any revisions touched by these changesets, bail
553 # if we don't have any revisions touched by these changesets, bail
554 if len(nodelist) == 0:
554 if len(nodelist) == 0:
555 yield self.close()
555 yield self.close()
556 return
556 return
557
557
558 revs = self._sortgroup(revlog, nodelist, lookup)
558 revs = self._sortgroup(revlog, nodelist, lookup)
559
559
560 # add the parent of the first rev
560 # add the parent of the first rev
561 p = revlog.parentrevs(revs[0])[0]
561 p = revlog.parentrevs(revs[0])[0]
562 revs.insert(0, p)
562 revs.insert(0, p)
563
563
564 # build deltas
564 # build deltas
565 total = len(revs) - 1
565 total = len(revs) - 1
566 msgbundling = _('bundling')
566 msgbundling = _('bundling')
567 for r in xrange(len(revs) - 1):
567 for r in xrange(len(revs) - 1):
568 if units is not None:
568 if units is not None:
569 self._progress(msgbundling, r + 1, unit=units, total=total)
569 self._progress(msgbundling, r + 1, unit=units, total=total)
570 prev, curr = revs[r], revs[r + 1]
570 prev, curr = revs[r], revs[r + 1]
571 linknode = lookup(revlog.node(curr))
571 linknode = lookup(revlog.node(curr))
572 for c in self.revchunk(revlog, curr, prev, linknode):
572 for c in self.revchunk(revlog, curr, prev, linknode):
573 yield c
573 yield c
574
574
575 if units is not None:
575 if units is not None:
576 self._progress(msgbundling, None)
576 self._progress(msgbundling, None)
577 yield self.close()
577 yield self.close()
578
578
579 # filter any nodes that claim to be part of the known set
579 # filter any nodes that claim to be part of the known set
580 def prune(self, revlog, missing, commonrevs):
580 def prune(self, revlog, missing, commonrevs):
581 rr, rl = revlog.rev, revlog.linkrev
581 rr, rl = revlog.rev, revlog.linkrev
582 return [n for n in missing if rl(rr(n)) not in commonrevs]
582 return [n for n in missing if rl(rr(n)) not in commonrevs]
583
583
584 def _packmanifests(self, dir, mfnodes, lookuplinknode):
584 def _packmanifests(self, dir, mfnodes, lookuplinknode):
585 """Pack flat manifests into a changegroup stream."""
585 """Pack flat manifests into a changegroup stream."""
586 assert not dir
586 assert not dir
587 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
587 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
588 lookuplinknode, units=_('manifests')):
588 lookuplinknode, units=_('manifests')):
589 yield chunk
589 yield chunk
590
590
591 def _manifestsdone(self):
591 def _manifestsdone(self):
592 return ''
592 return ''
593
593
594 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
594 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
595 '''yield a sequence of changegroup chunks (strings)'''
595 '''yield a sequence of changegroup chunks (strings)'''
596 repo = self._repo
596 repo = self._repo
597 cl = repo.changelog
597 cl = repo.changelog
598
598
599 clrevorder = {}
599 clrevorder = {}
600 mfs = {} # needed manifests
600 mfs = {} # needed manifests
601 fnodes = {} # needed file nodes
601 fnodes = {} # needed file nodes
602 changedfiles = set()
602 changedfiles = set()
603
603
604 # Callback for the changelog, used to collect changed files and manifest
604 # Callback for the changelog, used to collect changed files and manifest
605 # nodes.
605 # nodes.
606 # Returns the linkrev node (identity in the changelog case).
606 # Returns the linkrev node (identity in the changelog case).
607 def lookupcl(x):
607 def lookupcl(x):
608 c = cl.read(x)
608 c = cl.read(x)
609 clrevorder[x] = len(clrevorder)
609 clrevorder[x] = len(clrevorder)
610 n = c[0]
610 n = c[0]
611 # record the first changeset introducing this manifest version
611 # record the first changeset introducing this manifest version
612 mfs.setdefault(n, x)
612 mfs.setdefault(n, x)
613 # Record a complete list of potentially-changed files in
613 # Record a complete list of potentially-changed files in
614 # this manifest.
614 # this manifest.
615 changedfiles.update(c[3])
615 changedfiles.update(c[3])
616 return x
616 return x
617
617
618 self._verbosenote(_('uncompressed size of bundle content:\n'))
618 self._verbosenote(_('uncompressed size of bundle content:\n'))
619 size = 0
619 size = 0
620 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
620 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
621 size += len(chunk)
621 size += len(chunk)
622 yield chunk
622 yield chunk
623 self._verbosenote(_('%8.i (changelog)\n') % size)
623 self._verbosenote(_('%8.i (changelog)\n') % size)
624
624
625 # We need to make sure that the linkrev in the changegroup refers to
625 # We need to make sure that the linkrev in the changegroup refers to
626 # the first changeset that introduced the manifest or file revision.
626 # the first changeset that introduced the manifest or file revision.
627 # The fastpath is usually safer than the slowpath, because the filelogs
627 # The fastpath is usually safer than the slowpath, because the filelogs
628 # are walked in revlog order.
628 # are walked in revlog order.
629 #
629 #
630 # When taking the slowpath with reorder=None and the manifest revlog
630 # When taking the slowpath with reorder=None and the manifest revlog
631 # uses generaldelta, the manifest may be walked in the "wrong" order.
631 # uses generaldelta, the manifest may be walked in the "wrong" order.
632 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
632 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
633 # cc0ff93d0c0c).
633 # cc0ff93d0c0c).
634 #
634 #
635 # When taking the fastpath, we are only vulnerable to reordering
635 # When taking the fastpath, we are only vulnerable to reordering
636 # of the changelog itself. The changelog never uses generaldelta, so
636 # of the changelog itself. The changelog never uses generaldelta, so
637 # it is only reordered when reorder=True. To handle this case, we
637 # it is only reordered when reorder=True. To handle this case, we
638 # simply take the slowpath, which already has the 'clrevorder' logic.
638 # simply take the slowpath, which already has the 'clrevorder' logic.
639 # This was also fixed in cc0ff93d0c0c.
639 # This was also fixed in cc0ff93d0c0c.
640 fastpathlinkrev = fastpathlinkrev and not self._reorder
640 fastpathlinkrev = fastpathlinkrev and not self._reorder
641 # Treemanifests don't work correctly with fastpathlinkrev
641 # Treemanifests don't work correctly with fastpathlinkrev
642 # either, because we don't discover which directory nodes to
642 # either, because we don't discover which directory nodes to
643 # send along with files. This could probably be fixed.
643 # send along with files. This could probably be fixed.
644 fastpathlinkrev = fastpathlinkrev and (
644 fastpathlinkrev = fastpathlinkrev and (
645 'treemanifest' not in repo.requirements)
645 'treemanifest' not in repo.requirements)
646
646
647 for chunk in self.generatemanifests(commonrevs, clrevorder,
647 for chunk in self.generatemanifests(commonrevs, clrevorder,
648 fastpathlinkrev, mfs, fnodes):
648 fastpathlinkrev, mfs, fnodes):
649 yield chunk
649 yield chunk
650 mfs.clear()
650 mfs.clear()
651 clrevs = set(cl.rev(x) for x in clnodes)
651 clrevs = set(cl.rev(x) for x in clnodes)
652
652
653 if not fastpathlinkrev:
653 if not fastpathlinkrev:
654 def linknodes(unused, fname):
654 def linknodes(unused, fname):
655 return fnodes.get(fname, {})
655 return fnodes.get(fname, {})
656 else:
656 else:
657 cln = cl.node
657 cln = cl.node
658 def linknodes(filerevlog, fname):
658 def linknodes(filerevlog, fname):
659 llr = filerevlog.linkrev
659 llr = filerevlog.linkrev
660 fln = filerevlog.node
660 fln = filerevlog.node
661 revs = ((r, llr(r)) for r in filerevlog)
661 revs = ((r, llr(r)) for r in filerevlog)
662 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
662 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
663
663
664 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
664 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
665 source):
665 source):
666 yield chunk
666 yield chunk
667
667
668 yield self.close()
668 yield self.close()
669
669
670 if clnodes:
670 if clnodes:
671 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
671 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
672
672
673 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
673 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
674 fnodes):
674 fnodes):
675 repo = self._repo
675 repo = self._repo
676 mfl = repo.manifestlog
676 mfl = repo.manifestlog
677 dirlog = mfl._revlog.dirlog
677 dirlog = mfl._revlog.dirlog
678 tmfnodes = {'': mfs}
678 tmfnodes = {'': mfs}
679
679
680 # Callback for the manifest, used to collect linkrevs for filelog
680 # Callback for the manifest, used to collect linkrevs for filelog
681 # revisions.
681 # revisions.
682 # Returns the linkrev node (collected in lookupcl).
682 # Returns the linkrev node (collected in lookupcl).
683 def makelookupmflinknode(dir):
683 def makelookupmflinknode(dir):
684 if fastpathlinkrev:
684 if fastpathlinkrev:
685 assert not dir
685 assert not dir
686 return mfs.__getitem__
686 return mfs.__getitem__
687
687
688 def lookupmflinknode(x):
688 def lookupmflinknode(x):
689 """Callback for looking up the linknode for manifests.
689 """Callback for looking up the linknode for manifests.
690
690
691 Returns the linkrev node for the specified manifest.
691 Returns the linkrev node for the specified manifest.
692
692
693 SIDE EFFECT:
693 SIDE EFFECT:
694
694
695 1) fclnodes gets populated with the list of relevant
695 1) fclnodes gets populated with the list of relevant
696 file nodes if we're not using fastpathlinkrev
696 file nodes if we're not using fastpathlinkrev
697 2) When treemanifests are in use, collects treemanifest nodes
697 2) When treemanifests are in use, collects treemanifest nodes
698 to send
698 to send
699
699
700 Note that this means manifests must be completely sent to
700 Note that this means manifests must be completely sent to
701 the client before you can trust the list of files and
701 the client before you can trust the list of files and
702 treemanifests to send.
702 treemanifests to send.
703 """
703 """
704 clnode = tmfnodes[dir][x]
704 clnode = tmfnodes[dir][x]
705 mdata = mfl.get(dir, x).readfast(shallow=True)
705 mdata = mfl.get(dir, x).readfast(shallow=True)
706 for p, n, fl in mdata.iterentries():
706 for p, n, fl in mdata.iterentries():
707 if fl == 't': # subdirectory manifest
707 if fl == 't': # subdirectory manifest
708 subdir = dir + p + '/'
708 subdir = dir + p + '/'
709 tmfclnodes = tmfnodes.setdefault(subdir, {})
709 tmfclnodes = tmfnodes.setdefault(subdir, {})
710 tmfclnode = tmfclnodes.setdefault(n, clnode)
710 tmfclnode = tmfclnodes.setdefault(n, clnode)
711 if clrevorder[clnode] < clrevorder[tmfclnode]:
711 if clrevorder[clnode] < clrevorder[tmfclnode]:
712 tmfclnodes[n] = clnode
712 tmfclnodes[n] = clnode
713 else:
713 else:
714 f = dir + p
714 f = dir + p
715 fclnodes = fnodes.setdefault(f, {})
715 fclnodes = fnodes.setdefault(f, {})
716 fclnode = fclnodes.setdefault(n, clnode)
716 fclnode = fclnodes.setdefault(n, clnode)
717 if clrevorder[clnode] < clrevorder[fclnode]:
717 if clrevorder[clnode] < clrevorder[fclnode]:
718 fclnodes[n] = clnode
718 fclnodes[n] = clnode
719 return clnode
719 return clnode
720 return lookupmflinknode
720 return lookupmflinknode
721
721
722 size = 0
722 size = 0
723 while tmfnodes:
723 while tmfnodes:
724 dir = min(tmfnodes)
724 dir = min(tmfnodes)
725 nodes = tmfnodes[dir]
725 nodes = tmfnodes[dir]
726 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
726 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
727 if not dir or prunednodes:
727 if not dir or prunednodes:
728 for x in self._packmanifests(dir, prunednodes,
728 for x in self._packmanifests(dir, prunednodes,
729 makelookupmflinknode(dir)):
729 makelookupmflinknode(dir)):
730 size += len(x)
730 size += len(x)
731 yield x
731 yield x
732 del tmfnodes[dir]
732 del tmfnodes[dir]
733 self._verbosenote(_('%8.i (manifests)\n') % size)
733 self._verbosenote(_('%8.i (manifests)\n') % size)
734 yield self._manifestsdone()
734 yield self._manifestsdone()
735
735
736 # The 'source' parameter is useful for extensions
736 # The 'source' parameter is useful for extensions
737 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
737 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
738 repo = self._repo
738 repo = self._repo
739 progress = self._progress
739 progress = self._progress
740 msgbundling = _('bundling')
740 msgbundling = _('bundling')
741
741
742 total = len(changedfiles)
742 total = len(changedfiles)
743 # for progress output
743 # for progress output
744 msgfiles = _('files')
744 msgfiles = _('files')
745 for i, fname in enumerate(sorted(changedfiles)):
745 for i, fname in enumerate(sorted(changedfiles)):
746 filerevlog = repo.file(fname)
746 filerevlog = repo.file(fname)
747 if not filerevlog:
747 if not filerevlog:
748 raise error.Abort(_("empty or missing revlog for %s") % fname)
748 raise error.Abort(_("empty or missing revlog for %s") % fname)
749
749
750 linkrevnodes = linknodes(filerevlog, fname)
750 linkrevnodes = linknodes(filerevlog, fname)
751 # Lookup for filenodes, we collected the linkrev nodes above in the
751 # Lookup for filenodes, we collected the linkrev nodes above in the
752 # fastpath case and with lookupmf in the slowpath case.
752 # fastpath case and with lookupmf in the slowpath case.
753 def lookupfilelog(x):
753 def lookupfilelog(x):
754 return linkrevnodes[x]
754 return linkrevnodes[x]
755
755
756 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
756 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
757 if filenodes:
757 if filenodes:
758 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
758 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
759 total=total)
759 total=total)
760 h = self.fileheader(fname)
760 h = self.fileheader(fname)
761 size = len(h)
761 size = len(h)
762 yield h
762 yield h
763 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
763 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
764 size += len(chunk)
764 size += len(chunk)
765 yield chunk
765 yield chunk
766 self._verbosenote(_('%8.i %s\n') % (size, fname))
766 self._verbosenote(_('%8.i %s\n') % (size, fname))
767 progress(msgbundling, None)
767 progress(msgbundling, None)
768
768
769 def deltaparent(self, revlog, rev, p1, p2, prev):
769 def deltaparent(self, revlog, rev, p1, p2, prev):
770 return prev
770 return prev
771
771
772 def revchunk(self, revlog, rev, prev, linknode):
772 def revchunk(self, revlog, rev, prev, linknode):
773 node = revlog.node(rev)
773 node = revlog.node(rev)
774 p1, p2 = revlog.parentrevs(rev)
774 p1, p2 = revlog.parentrevs(rev)
775 base = self.deltaparent(revlog, rev, p1, p2, prev)
775 base = self.deltaparent(revlog, rev, p1, p2, prev)
776
776
777 prefix = ''
777 prefix = ''
778 if revlog.iscensored(base) or revlog.iscensored(rev):
778 if revlog.iscensored(base) or revlog.iscensored(rev):
779 try:
779 try:
780 delta = revlog.revision(node, raw=True)
780 delta = revlog.revision(node, raw=True)
781 except error.CensoredNodeError as e:
781 except error.CensoredNodeError as e:
782 delta = e.tombstone
782 delta = e.tombstone
783 if base == nullrev:
783 if base == nullrev:
784 prefix = mdiff.trivialdiffheader(len(delta))
784 prefix = mdiff.trivialdiffheader(len(delta))
785 else:
785 else:
786 baselen = revlog.rawsize(base)
786 baselen = revlog.rawsize(base)
787 prefix = mdiff.replacediffheader(baselen, len(delta))
787 prefix = mdiff.replacediffheader(baselen, len(delta))
788 elif base == nullrev:
788 elif base == nullrev:
789 delta = revlog.revision(node, raw=True)
789 delta = revlog.revision(node, raw=True)
790 prefix = mdiff.trivialdiffheader(len(delta))
790 prefix = mdiff.trivialdiffheader(len(delta))
791 else:
791 else:
792 delta = revlog.revdiff(base, rev)
792 delta = revlog.revdiff(base, rev)
793 p1n, p2n = revlog.parents(node)
793 p1n, p2n = revlog.parents(node)
794 basenode = revlog.node(base)
794 basenode = revlog.node(base)
795 flags = revlog.flags(rev)
795 flags = revlog.flags(rev)
796 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
796 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
797 meta += prefix
797 meta += prefix
798 l = len(meta) + len(delta)
798 l = len(meta) + len(delta)
799 yield chunkheader(l)
799 yield chunkheader(l)
800 yield meta
800 yield meta
801 yield delta
801 yield delta
802 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
802 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
803 # do nothing with basenode, it is implicitly the previous one in HG10
803 # do nothing with basenode, it is implicitly the previous one in HG10
804 # do nothing with flags, it is implicitly 0 for cg1 and cg2
804 # do nothing with flags, it is implicitly 0 for cg1 and cg2
805 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
805 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
806
806
807 class cg2packer(cg1packer):
807 class cg2packer(cg1packer):
808 version = '02'
808 version = '02'
809 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
809 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
810
810
811 def __init__(self, repo):
811 def __init__(self, repo):
812 super(cg2packer, self).__init__(repo)
812 super(cg2packer, self).__init__(repo)
813 if self._reorder is None:
813 if self._reorder is None:
814 # Since generaldelta is directly supported by cg2, reordering
814 # Since generaldelta is directly supported by cg2, reordering
815 # generally doesn't help, so we disable it by default (treating
815 # generally doesn't help, so we disable it by default (treating
816 # bundle.reorder=auto just like bundle.reorder=False).
816 # bundle.reorder=auto just like bundle.reorder=False).
817 self._reorder = False
817 self._reorder = False
818
818
819 def deltaparent(self, revlog, rev, p1, p2, prev):
819 def deltaparent(self, revlog, rev, p1, p2, prev):
820 dp = revlog.deltaparent(rev)
820 dp = revlog.deltaparent(rev)
821 if dp == nullrev and revlog.storedeltachains:
821 if dp == nullrev and revlog.storedeltachains:
822 # Avoid sending full revisions when delta parent is null. Pick prev
822 # Avoid sending full revisions when delta parent is null. Pick prev
823 # in that case. It's tempting to pick p1 in this case, as p1 will
823 # in that case. It's tempting to pick p1 in this case, as p1 will
824 # be smaller in the common case. However, computing a delta against
824 # be smaller in the common case. However, computing a delta against
825 # p1 may require resolving the raw text of p1, which could be
825 # p1 may require resolving the raw text of p1, which could be
826 # expensive. The revlog caches should have prev cached, meaning
826 # expensive. The revlog caches should have prev cached, meaning
827 # less CPU for changegroup generation. There is likely room to add
827 # less CPU for changegroup generation. There is likely room to add
828 # a flag and/or config option to control this behavior.
828 # a flag and/or config option to control this behavior.
829 return prev
829 return prev
830 elif dp == nullrev:
830 elif dp == nullrev:
831 # revlog is configured to use full snapshot for a reason,
831 # revlog is configured to use full snapshot for a reason,
832 # stick to full snapshot.
832 # stick to full snapshot.
833 return nullrev
833 return nullrev
834 elif dp not in (p1, p2, prev):
834 elif dp not in (p1, p2, prev):
835 # Pick prev when we can't be sure remote has the base revision.
835 # Pick prev when we can't be sure remote has the base revision.
836 return prev
836 return prev
837 else:
837 else:
838 return dp
838 return dp
839
839
840 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
840 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
841 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
841 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
842 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
842 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
843
843
844 class cg3packer(cg2packer):
844 class cg3packer(cg2packer):
845 version = '03'
845 version = '03'
846 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
846 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
847
847
848 def _packmanifests(self, dir, mfnodes, lookuplinknode):
848 def _packmanifests(self, dir, mfnodes, lookuplinknode):
849 if dir:
849 if dir:
850 yield self.fileheader(dir)
850 yield self.fileheader(dir)
851
851
852 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
852 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
853 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
853 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
854 units=_('manifests')):
854 units=_('manifests')):
855 yield chunk
855 yield chunk
856
856
857 def _manifestsdone(self):
857 def _manifestsdone(self):
858 return self.close()
858 return self.close()
859
859
860 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
860 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
861 return struct.pack(
861 return struct.pack(
862 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
862 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
863
863
864 _packermap = {'01': (cg1packer, cg1unpacker),
864 _packermap = {'01': (cg1packer, cg1unpacker),
865 # cg2 adds support for exchanging generaldelta
865 # cg2 adds support for exchanging generaldelta
866 '02': (cg2packer, cg2unpacker),
866 '02': (cg2packer, cg2unpacker),
867 # cg3 adds support for exchanging revlog flags and treemanifests
867 # cg3 adds support for exchanging revlog flags and treemanifests
868 '03': (cg3packer, cg3unpacker),
868 '03': (cg3packer, cg3unpacker),
869 }
869 }
870
870
871 def allsupportedversions(repo):
871 def allsupportedversions(repo):
872 versions = set(_packermap.keys())
872 versions = set(_packermap.keys())
873 if not (repo.ui.configbool('experimental', 'changegroup3') or
873 if not (repo.ui.configbool('experimental', 'changegroup3') or
874 repo.ui.configbool('experimental', 'treemanifest') or
874 repo.ui.configbool('experimental', 'treemanifest') or
875 'treemanifest' in repo.requirements):
875 'treemanifest' in repo.requirements):
876 versions.discard('03')
876 versions.discard('03')
877 return versions
877 return versions
878
878
879 # Changegroup versions that can be applied to the repo
879 # Changegroup versions that can be applied to the repo
880 def supportedincomingversions(repo):
880 def supportedincomingversions(repo):
881 return allsupportedversions(repo)
881 return allsupportedversions(repo)
882
882
883 # Changegroup versions that can be created from the repo
883 # Changegroup versions that can be created from the repo
884 def supportedoutgoingversions(repo):
884 def supportedoutgoingversions(repo):
885 versions = allsupportedversions(repo)
885 versions = allsupportedversions(repo)
886 if 'treemanifest' in repo.requirements:
886 if 'treemanifest' in repo.requirements:
887 # Versions 01 and 02 support only flat manifests and it's just too
887 # Versions 01 and 02 support only flat manifests and it's just too
888 # expensive to convert between the flat manifest and tree manifest on
888 # expensive to convert between the flat manifest and tree manifest on
889 # the fly. Since tree manifests are hashed differently, all of history
889 # the fly. Since tree manifests are hashed differently, all of history
890 # would have to be converted. Instead, we simply don't even pretend to
890 # would have to be converted. Instead, we simply don't even pretend to
891 # support versions 01 and 02.
891 # support versions 01 and 02.
892 versions.discard('01')
892 versions.discard('01')
893 versions.discard('02')
893 versions.discard('02')
894 return versions
894 return versions
895
895
896 def safeversion(repo):
896 def safeversion(repo):
897 # Finds the smallest version that it's safe to assume clients of the repo
897 # Finds the smallest version that it's safe to assume clients of the repo
898 # will support. For example, all hg versions that support generaldelta also
898 # will support. For example, all hg versions that support generaldelta also
899 # support changegroup 02.
899 # support changegroup 02.
900 versions = supportedoutgoingversions(repo)
900 versions = supportedoutgoingversions(repo)
901 if 'generaldelta' in repo.requirements:
901 if 'generaldelta' in repo.requirements:
902 versions.discard('01')
902 versions.discard('01')
903 assert versions
903 assert versions
904 return min(versions)
904 return min(versions)
905
905
906 def getbundler(version, repo):
906 def getbundler(version, repo):
907 assert version in supportedoutgoingversions(repo)
907 assert version in supportedoutgoingversions(repo)
908 return _packermap[version][0](repo)
908 return _packermap[version][0](repo)
909
909
910 def getunbundler(version, fh, alg, extras=None):
910 def getunbundler(version, fh, alg, extras=None):
911 return _packermap[version][1](fh, alg, extras=extras)
911 return _packermap[version][1](fh, alg, extras=extras)
912
912
913 def _changegroupinfo(repo, nodes, source):
913 def _changegroupinfo(repo, nodes, source):
914 if repo.ui.verbose or source == 'bundle':
914 if repo.ui.verbose or source == 'bundle':
915 repo.ui.status(_("%d changesets found\n") % len(nodes))
915 repo.ui.status(_("%d changesets found\n") % len(nodes))
916 if repo.ui.debugflag:
916 if repo.ui.debugflag:
917 repo.ui.debug("list of changesets:\n")
917 repo.ui.debug("list of changesets:\n")
918 for node in nodes:
918 for node in nodes:
919 repo.ui.debug("%s\n" % hex(node))
919 repo.ui.debug("%s\n" % hex(node))
920
920
921 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
921 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
922 repo = repo.unfiltered()
922 repo = repo.unfiltered()
923 commonrevs = outgoing.common
923 commonrevs = outgoing.common
924 csets = outgoing.missing
924 csets = outgoing.missing
925 heads = outgoing.missingheads
925 heads = outgoing.missingheads
926 # We go through the fast path if we get told to, or if all (unfiltered
926 # We go through the fast path if we get told to, or if all (unfiltered
927 # heads have been requested (since we then know there all linkrevs will
927 # heads have been requested (since we then know there all linkrevs will
928 # be pulled by the client).
928 # be pulled by the client).
929 heads.sort()
929 heads.sort()
930 fastpathlinkrev = fastpath or (
930 fastpathlinkrev = fastpath or (
931 repo.filtername is None and heads == sorted(repo.heads()))
931 repo.filtername is None and heads == sorted(repo.heads()))
932
932
933 repo.hook('preoutgoing', throw=True, source=source)
933 repo.hook('preoutgoing', throw=True, source=source)
934 _changegroupinfo(repo, csets, source)
934 _changegroupinfo(repo, csets, source)
935 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
935 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
936
936
937 def getsubset(repo, outgoing, bundler, source, fastpath=False):
937 def getsubset(repo, outgoing, bundler, source, fastpath=False):
938 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
938 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
939 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
939 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
940 {'clcount': len(outgoing.missing)})
940 {'clcount': len(outgoing.missing)})
941
941
942 def changegroupsubset(repo, roots, heads, source, version='01'):
942 def changegroupsubset(repo, roots, heads, source, version='01'):
943 """Compute a changegroup consisting of all the nodes that are
943 """Compute a changegroup consisting of all the nodes that are
944 descendants of any of the roots and ancestors of any of the heads.
944 descendants of any of the roots and ancestors of any of the heads.
945 Return a chunkbuffer object whose read() method will return
945 Return a chunkbuffer object whose read() method will return
946 successive changegroup chunks.
946 successive changegroup chunks.
947
947
948 It is fairly complex as determining which filenodes and which
948 It is fairly complex as determining which filenodes and which
949 manifest nodes need to be included for the changeset to be complete
949 manifest nodes need to be included for the changeset to be complete
950 is non-trivial.
950 is non-trivial.
951
951
952 Another wrinkle is doing the reverse, figuring out which changeset in
952 Another wrinkle is doing the reverse, figuring out which changeset in
953 the changegroup a particular filenode or manifestnode belongs to.
953 the changegroup a particular filenode or manifestnode belongs to.
954 """
954 """
955 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
955 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
956 bundler = getbundler(version, repo)
956 bundler = getbundler(version, repo)
957 return getsubset(repo, outgoing, bundler, source)
957 return getsubset(repo, outgoing, bundler, source)
958
958
959 def getlocalchangegroupraw(repo, source, outgoing, version='01'):
959 def getlocalchangegroupraw(repo, source, outgoing, version='01'):
960 """Like getbundle, but taking a discovery.outgoing as an argument.
960 """Like getbundle, but taking a discovery.outgoing as an argument.
961
961
962 This is only implemented for local repos and reuses potentially
962 This is only implemented for local repos and reuses potentially
963 precomputed sets in outgoing. Returns a raw changegroup generator."""
963 precomputed sets in outgoing. Returns a raw changegroup generator."""
964 if not outgoing.missing:
964 if not outgoing.missing:
965 return None
965 return None
966 bundler = getbundler(version, repo)
966 bundler = getbundler(version, repo)
967 return getsubsetraw(repo, outgoing, bundler, source)
967 return getsubsetraw(repo, outgoing, bundler, source)
968
968
969 def getchangegroup(repo, source, outgoing, version='01'):
969 def getchangegroup(repo, source, outgoing, version='01'):
970 """Like getbundle, but taking a discovery.outgoing as an argument.
970 """Like getbundle, but taking a discovery.outgoing as an argument.
971
971
972 This is only implemented for local repos and reuses potentially
972 This is only implemented for local repos and reuses potentially
973 precomputed sets in outgoing."""
973 precomputed sets in outgoing."""
974 if not outgoing.missing:
974 if not outgoing.missing:
975 return None
975 return None
976 bundler = getbundler(version, repo)
976 bundler = getbundler(version, repo)
977 return getsubset(repo, outgoing, bundler, source)
977 return getsubset(repo, outgoing, bundler, source)
978
978
979 # deprecate me once all users are gone
979 def getlocalchangegroup(repo, *args, **kwargs):
980 getlocalchangegroup = getchangegroup
980 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
981 '4.3')
982 return getchangegroup(repo, *args, **kwargs)
981
983
982 def changegroup(repo, basenodes, source):
984 def changegroup(repo, basenodes, source):
983 # to avoid a race we use changegroupsubset() (issue1320)
985 # to avoid a race we use changegroupsubset() (issue1320)
984 return changegroupsubset(repo, basenodes, repo.heads(), source)
986 return changegroupsubset(repo, basenodes, repo.heads(), source)
985
987
986 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
988 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
987 revisions = 0
989 revisions = 0
988 files = 0
990 files = 0
989 for chunkdata in iter(source.filelogheader, {}):
991 for chunkdata in iter(source.filelogheader, {}):
990 files += 1
992 files += 1
991 f = chunkdata["filename"]
993 f = chunkdata["filename"]
992 repo.ui.debug("adding %s revisions\n" % f)
994 repo.ui.debug("adding %s revisions\n" % f)
993 repo.ui.progress(_('files'), files, unit=_('files'),
995 repo.ui.progress(_('files'), files, unit=_('files'),
994 total=expectedfiles)
996 total=expectedfiles)
995 fl = repo.file(f)
997 fl = repo.file(f)
996 o = len(fl)
998 o = len(fl)
997 try:
999 try:
998 if not fl.addgroup(source, revmap, trp):
1000 if not fl.addgroup(source, revmap, trp):
999 raise error.Abort(_("received file revlog group is empty"))
1001 raise error.Abort(_("received file revlog group is empty"))
1000 except error.CensoredBaseError as e:
1002 except error.CensoredBaseError as e:
1001 raise error.Abort(_("received delta base is censored: %s") % e)
1003 raise error.Abort(_("received delta base is censored: %s") % e)
1002 revisions += len(fl) - o
1004 revisions += len(fl) - o
1003 if f in needfiles:
1005 if f in needfiles:
1004 needs = needfiles[f]
1006 needs = needfiles[f]
1005 for new in xrange(o, len(fl)):
1007 for new in xrange(o, len(fl)):
1006 n = fl.node(new)
1008 n = fl.node(new)
1007 if n in needs:
1009 if n in needs:
1008 needs.remove(n)
1010 needs.remove(n)
1009 else:
1011 else:
1010 raise error.Abort(
1012 raise error.Abort(
1011 _("received spurious file revlog entry"))
1013 _("received spurious file revlog entry"))
1012 if not needs:
1014 if not needs:
1013 del needfiles[f]
1015 del needfiles[f]
1014 repo.ui.progress(_('files'), None)
1016 repo.ui.progress(_('files'), None)
1015
1017
1016 for f, needs in needfiles.iteritems():
1018 for f, needs in needfiles.iteritems():
1017 fl = repo.file(f)
1019 fl = repo.file(f)
1018 for n in needs:
1020 for n in needs:
1019 try:
1021 try:
1020 fl.rev(n)
1022 fl.rev(n)
1021 except error.LookupError:
1023 except error.LookupError:
1022 raise error.Abort(
1024 raise error.Abort(
1023 _('missing file data for %s:%s - run hg verify') %
1025 _('missing file data for %s:%s - run hg verify') %
1024 (f, hex(n)))
1026 (f, hex(n)))
1025
1027
1026 return revisions, files
1028 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now