##// END OF EJS Templates
changegroup: use progress helper in apply() (API)...
Martin von Zweigbergk -
r38365:83534c4e default
parent child Browse files
Show More
@@ -1,1022 +1,1016 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullrev,
17 nullrev,
18 short,
18 short,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 dagutil,
22 dagutil,
23 error,
23 error,
24 mdiff,
24 mdiff,
25 phases,
25 phases,
26 pycompat,
26 pycompat,
27 util,
27 util,
28 )
28 )
29
29
30 from .utils import (
30 from .utils import (
31 stringutil,
31 stringutil,
32 )
32 )
33
33
34 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
35 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
36 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
37
37
38 LFS_REQUIREMENT = 'lfs'
38 LFS_REQUIREMENT = 'lfs'
39
39
40 # When narrowing is finalized and no longer subject to format changes,
40 # When narrowing is finalized and no longer subject to format changes,
41 # we should move this to just "narrow" or similar.
41 # we should move this to just "narrow" or similar.
42 NARROW_REQUIREMENT = 'narrowhg-experimental'
42 NARROW_REQUIREMENT = 'narrowhg-experimental'
43
43
44 readexactly = util.readexactly
44 readexactly = util.readexactly
45
45
46 def getchunk(stream):
46 def getchunk(stream):
47 """return the next chunk from stream as a string"""
47 """return the next chunk from stream as a string"""
48 d = readexactly(stream, 4)
48 d = readexactly(stream, 4)
49 l = struct.unpack(">l", d)[0]
49 l = struct.unpack(">l", d)[0]
50 if l <= 4:
50 if l <= 4:
51 if l:
51 if l:
52 raise error.Abort(_("invalid chunk length %d") % l)
52 raise error.Abort(_("invalid chunk length %d") % l)
53 return ""
53 return ""
54 return readexactly(stream, l - 4)
54 return readexactly(stream, l - 4)
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(">l", length + 4)
58 return struct.pack(">l", length + 4)
59
59
60 def closechunk():
60 def closechunk():
61 """return a changegroup chunk header (string) for a zero-length chunk"""
61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 return struct.pack(">l", 0)
62 return struct.pack(">l", 0)
63
63
64 def writechunks(ui, chunks, filename, vfs=None):
64 def writechunks(ui, chunks, filename, vfs=None):
65 """Write chunks to a file and return its filename.
65 """Write chunks to a file and return its filename.
66
66
67 The stream is assumed to be a bundle file.
67 The stream is assumed to be a bundle file.
68 Existing files will not be overwritten.
68 Existing files will not be overwritten.
69 If no filename is specified, a temporary file is created.
69 If no filename is specified, a temporary file is created.
70 """
70 """
71 fh = None
71 fh = None
72 cleanup = None
72 cleanup = None
73 try:
73 try:
74 if filename:
74 if filename:
75 if vfs:
75 if vfs:
76 fh = vfs.open(filename, "wb")
76 fh = vfs.open(filename, "wb")
77 else:
77 else:
78 # Increase default buffer size because default is usually
78 # Increase default buffer size because default is usually
79 # small (4k is common on Linux).
79 # small (4k is common on Linux).
80 fh = open(filename, "wb", 131072)
80 fh = open(filename, "wb", 131072)
81 else:
81 else:
82 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
83 fh = os.fdopen(fd, r"wb")
83 fh = os.fdopen(fd, r"wb")
84 cleanup = filename
84 cleanup = filename
85 for c in chunks:
85 for c in chunks:
86 fh.write(c)
86 fh.write(c)
87 cleanup = None
87 cleanup = None
88 return filename
88 return filename
89 finally:
89 finally:
90 if fh is not None:
90 if fh is not None:
91 fh.close()
91 fh.close()
92 if cleanup is not None:
92 if cleanup is not None:
93 if filename and vfs:
93 if filename and vfs:
94 vfs.unlink(cleanup)
94 vfs.unlink(cleanup)
95 else:
95 else:
96 os.unlink(cleanup)
96 os.unlink(cleanup)
97
97
98 class cg1unpacker(object):
98 class cg1unpacker(object):
99 """Unpacker for cg1 changegroup streams.
99 """Unpacker for cg1 changegroup streams.
100
100
101 A changegroup unpacker handles the framing of the revision data in
101 A changegroup unpacker handles the framing of the revision data in
102 the wire format. Most consumers will want to use the apply()
102 the wire format. Most consumers will want to use the apply()
103 method to add the changes from the changegroup to a repository.
103 method to add the changes from the changegroup to a repository.
104
104
105 If you're forwarding a changegroup unmodified to another consumer,
105 If you're forwarding a changegroup unmodified to another consumer,
106 use getchunks(), which returns an iterator of changegroup
106 use getchunks(), which returns an iterator of changegroup
107 chunks. This is mostly useful for cases where you need to know the
107 chunks. This is mostly useful for cases where you need to know the
108 data stream has ended by observing the end of the changegroup.
108 data stream has ended by observing the end of the changegroup.
109
109
110 deltachunk() is useful only if you're applying delta data. Most
110 deltachunk() is useful only if you're applying delta data. Most
111 consumers should prefer apply() instead.
111 consumers should prefer apply() instead.
112
112
113 A few other public methods exist. Those are used only for
113 A few other public methods exist. Those are used only for
114 bundlerepo and some debug commands - their use is discouraged.
114 bundlerepo and some debug commands - their use is discouraged.
115 """
115 """
116 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
116 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
117 deltaheadersize = struct.calcsize(deltaheader)
117 deltaheadersize = struct.calcsize(deltaheader)
118 version = '01'
118 version = '01'
119 _grouplistcount = 1 # One list of files after the manifests
119 _grouplistcount = 1 # One list of files after the manifests
120
120
121 def __init__(self, fh, alg, extras=None):
121 def __init__(self, fh, alg, extras=None):
122 if alg is None:
122 if alg is None:
123 alg = 'UN'
123 alg = 'UN'
124 if alg not in util.compengines.supportedbundletypes:
124 if alg not in util.compengines.supportedbundletypes:
125 raise error.Abort(_('unknown stream compression type: %s')
125 raise error.Abort(_('unknown stream compression type: %s')
126 % alg)
126 % alg)
127 if alg == 'BZ':
127 if alg == 'BZ':
128 alg = '_truncatedBZ'
128 alg = '_truncatedBZ'
129
129
130 compengine = util.compengines.forbundletype(alg)
130 compengine = util.compengines.forbundletype(alg)
131 self._stream = compengine.decompressorreader(fh)
131 self._stream = compengine.decompressorreader(fh)
132 self._type = alg
132 self._type = alg
133 self.extras = extras or {}
133 self.extras = extras or {}
134 self.callback = None
134 self.callback = None
135
135
136 # These methods (compressed, read, seek, tell) all appear to only
136 # These methods (compressed, read, seek, tell) all appear to only
137 # be used by bundlerepo, but it's a little hard to tell.
137 # be used by bundlerepo, but it's a little hard to tell.
138 def compressed(self):
138 def compressed(self):
139 return self._type is not None and self._type != 'UN'
139 return self._type is not None and self._type != 'UN'
140 def read(self, l):
140 def read(self, l):
141 return self._stream.read(l)
141 return self._stream.read(l)
142 def seek(self, pos):
142 def seek(self, pos):
143 return self._stream.seek(pos)
143 return self._stream.seek(pos)
144 def tell(self):
144 def tell(self):
145 return self._stream.tell()
145 return self._stream.tell()
146 def close(self):
146 def close(self):
147 return self._stream.close()
147 return self._stream.close()
148
148
149 def _chunklength(self):
149 def _chunklength(self):
150 d = readexactly(self._stream, 4)
150 d = readexactly(self._stream, 4)
151 l = struct.unpack(">l", d)[0]
151 l = struct.unpack(">l", d)[0]
152 if l <= 4:
152 if l <= 4:
153 if l:
153 if l:
154 raise error.Abort(_("invalid chunk length %d") % l)
154 raise error.Abort(_("invalid chunk length %d") % l)
155 return 0
155 return 0
156 if self.callback:
156 if self.callback:
157 self.callback()
157 self.callback()
158 return l - 4
158 return l - 4
159
159
160 def changelogheader(self):
160 def changelogheader(self):
161 """v10 does not have a changelog header chunk"""
161 """v10 does not have a changelog header chunk"""
162 return {}
162 return {}
163
163
164 def manifestheader(self):
164 def manifestheader(self):
165 """v10 does not have a manifest header chunk"""
165 """v10 does not have a manifest header chunk"""
166 return {}
166 return {}
167
167
168 def filelogheader(self):
168 def filelogheader(self):
169 """return the header of the filelogs chunk, v10 only has the filename"""
169 """return the header of the filelogs chunk, v10 only has the filename"""
170 l = self._chunklength()
170 l = self._chunklength()
171 if not l:
171 if not l:
172 return {}
172 return {}
173 fname = readexactly(self._stream, l)
173 fname = readexactly(self._stream, l)
174 return {'filename': fname}
174 return {'filename': fname}
175
175
176 def _deltaheader(self, headertuple, prevnode):
176 def _deltaheader(self, headertuple, prevnode):
177 node, p1, p2, cs = headertuple
177 node, p1, p2, cs = headertuple
178 if prevnode is None:
178 if prevnode is None:
179 deltabase = p1
179 deltabase = p1
180 else:
180 else:
181 deltabase = prevnode
181 deltabase = prevnode
182 flags = 0
182 flags = 0
183 return node, p1, p2, deltabase, cs, flags
183 return node, p1, p2, deltabase, cs, flags
184
184
185 def deltachunk(self, prevnode):
185 def deltachunk(self, prevnode):
186 l = self._chunklength()
186 l = self._chunklength()
187 if not l:
187 if not l:
188 return {}
188 return {}
189 headerdata = readexactly(self._stream, self.deltaheadersize)
189 headerdata = readexactly(self._stream, self.deltaheadersize)
190 header = struct.unpack(self.deltaheader, headerdata)
190 header = struct.unpack(self.deltaheader, headerdata)
191 delta = readexactly(self._stream, l - self.deltaheadersize)
191 delta = readexactly(self._stream, l - self.deltaheadersize)
192 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
192 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
193 return (node, p1, p2, cs, deltabase, delta, flags)
193 return (node, p1, p2, cs, deltabase, delta, flags)
194
194
195 def getchunks(self):
195 def getchunks(self):
196 """returns all the chunks contains in the bundle
196 """returns all the chunks contains in the bundle
197
197
198 Used when you need to forward the binary stream to a file or another
198 Used when you need to forward the binary stream to a file or another
199 network API. To do so, it parse the changegroup data, otherwise it will
199 network API. To do so, it parse the changegroup data, otherwise it will
200 block in case of sshrepo because it don't know the end of the stream.
200 block in case of sshrepo because it don't know the end of the stream.
201 """
201 """
202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
205 # filelogs.
205 # filelogs.
206 #
206 #
207 # Changelog and manifestlog parts are terminated with empty chunks. The
207 # Changelog and manifestlog parts are terminated with empty chunks. The
208 # tree and file parts are a list of entry sections. Each entry section
208 # tree and file parts are a list of entry sections. Each entry section
209 # is a series of chunks terminating in an empty chunk. The list of these
209 # is a series of chunks terminating in an empty chunk. The list of these
210 # entry sections is terminated in yet another empty chunk, so we know
210 # entry sections is terminated in yet another empty chunk, so we know
211 # we've reached the end of the tree/file list when we reach an empty
211 # we've reached the end of the tree/file list when we reach an empty
212 # chunk that was proceeded by no non-empty chunks.
212 # chunk that was proceeded by no non-empty chunks.
213
213
214 parts = 0
214 parts = 0
215 while parts < 2 + self._grouplistcount:
215 while parts < 2 + self._grouplistcount:
216 noentries = True
216 noentries = True
217 while True:
217 while True:
218 chunk = getchunk(self)
218 chunk = getchunk(self)
219 if not chunk:
219 if not chunk:
220 # The first two empty chunks represent the end of the
220 # The first two empty chunks represent the end of the
221 # changelog and the manifestlog portions. The remaining
221 # changelog and the manifestlog portions. The remaining
222 # empty chunks represent either A) the end of individual
222 # empty chunks represent either A) the end of individual
223 # tree or file entries in the file list, or B) the end of
223 # tree or file entries in the file list, or B) the end of
224 # the entire list. It's the end of the entire list if there
224 # the entire list. It's the end of the entire list if there
225 # were no entries (i.e. noentries is True).
225 # were no entries (i.e. noentries is True).
226 if parts < 2:
226 if parts < 2:
227 parts += 1
227 parts += 1
228 elif noentries:
228 elif noentries:
229 parts += 1
229 parts += 1
230 break
230 break
231 noentries = False
231 noentries = False
232 yield chunkheader(len(chunk))
232 yield chunkheader(len(chunk))
233 pos = 0
233 pos = 0
234 while pos < len(chunk):
234 while pos < len(chunk):
235 next = pos + 2**20
235 next = pos + 2**20
236 yield chunk[pos:next]
236 yield chunk[pos:next]
237 pos = next
237 pos = next
238 yield closechunk()
238 yield closechunk()
239
239
240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
240 def _unpackmanifests(self, repo, revmap, trp, prog):
241 # We know that we'll never have more manifests than we had
241 self.callback = prog.increment
242 # changesets.
243 self.callback = prog(_('manifests'), numchanges)
244 # no need to check for empty manifest group here:
242 # no need to check for empty manifest group here:
245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
243 # if the result of the merge of 1 and 2 is the same in 3 and 4,
246 # no new manifest will be created and the manifest group will
244 # no new manifest will be created and the manifest group will
247 # be empty during the pull
245 # be empty during the pull
248 self.manifestheader()
246 self.manifestheader()
249 deltas = self.deltaiter()
247 deltas = self.deltaiter()
250 repo.manifestlog._revlog.addgroup(deltas, revmap, trp)
248 repo.manifestlog._revlog.addgroup(deltas, revmap, trp)
251 repo.ui.progress(_('manifests'), None)
249 prog.update(None)
252 self.callback = None
250 self.callback = None
253
251
254 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
252 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
255 expectedtotal=None):
253 expectedtotal=None):
256 """Add the changegroup returned by source.read() to this repo.
254 """Add the changegroup returned by source.read() to this repo.
257 srctype is a string like 'push', 'pull', or 'unbundle'. url is
255 srctype is a string like 'push', 'pull', or 'unbundle'. url is
258 the URL of the repo where this changegroup is coming from.
256 the URL of the repo where this changegroup is coming from.
259
257
260 Return an integer summarizing the change to this repo:
258 Return an integer summarizing the change to this repo:
261 - nothing changed or no source: 0
259 - nothing changed or no source: 0
262 - more heads than before: 1+added heads (2..n)
260 - more heads than before: 1+added heads (2..n)
263 - fewer heads than before: -1-removed heads (-2..-n)
261 - fewer heads than before: -1-removed heads (-2..-n)
264 - number of heads stays the same: 1
262 - number of heads stays the same: 1
265 """
263 """
266 repo = repo.unfiltered()
264 repo = repo.unfiltered()
267 def csmap(x):
265 def csmap(x):
268 repo.ui.debug("add changeset %s\n" % short(x))
266 repo.ui.debug("add changeset %s\n" % short(x))
269 return len(cl)
267 return len(cl)
270
268
271 def revmap(x):
269 def revmap(x):
272 return cl.rev(x)
270 return cl.rev(x)
273
271
274 changesets = files = revisions = 0
272 changesets = files = revisions = 0
275
273
276 try:
274 try:
277 # The transaction may already carry source information. In this
275 # The transaction may already carry source information. In this
278 # case we use the top level data. We overwrite the argument
276 # case we use the top level data. We overwrite the argument
279 # because we need to use the top level value (if they exist)
277 # because we need to use the top level value (if they exist)
280 # in this function.
278 # in this function.
281 srctype = tr.hookargs.setdefault('source', srctype)
279 srctype = tr.hookargs.setdefault('source', srctype)
282 url = tr.hookargs.setdefault('url', url)
280 url = tr.hookargs.setdefault('url', url)
283 repo.hook('prechangegroup',
281 repo.hook('prechangegroup',
284 throw=True, **pycompat.strkwargs(tr.hookargs))
282 throw=True, **pycompat.strkwargs(tr.hookargs))
285
283
286 # write changelog data to temp files so concurrent readers
284 # write changelog data to temp files so concurrent readers
287 # will not see an inconsistent view
285 # will not see an inconsistent view
288 cl = repo.changelog
286 cl = repo.changelog
289 cl.delayupdate(tr)
287 cl.delayupdate(tr)
290 oldheads = set(cl.heads())
288 oldheads = set(cl.heads())
291
289
292 trp = weakref.proxy(tr)
290 trp = weakref.proxy(tr)
293 # pull off the changeset group
291 # pull off the changeset group
294 repo.ui.status(_("adding changesets\n"))
292 repo.ui.status(_("adding changesets\n"))
295 clstart = len(cl)
293 clstart = len(cl)
296 class prog(object):
294 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
297 def __init__(self, step, total):
295 total=expectedtotal)
298 self._step = step
296 self.callback = progress.increment
299 self._total = total
300 self._count = 1
301 def __call__(self):
302 repo.ui.progress(self._step, self._count, unit=_('chunks'),
303 total=self._total)
304 self._count += 1
305 self.callback = prog(_('changesets'), expectedtotal)
306
297
307 efiles = set()
298 efiles = set()
308 def onchangelog(cl, node):
299 def onchangelog(cl, node):
309 efiles.update(cl.readfiles(node))
300 efiles.update(cl.readfiles(node))
310
301
311 self.changelogheader()
302 self.changelogheader()
312 deltas = self.deltaiter()
303 deltas = self.deltaiter()
313 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
304 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
314 efiles = len(efiles)
305 efiles = len(efiles)
315
306
316 if not cgnodes:
307 if not cgnodes:
317 repo.ui.develwarn('applied empty changegroup',
308 repo.ui.develwarn('applied empty changegroup',
318 config='warn-empty-changegroup')
309 config='warn-empty-changegroup')
319 clend = len(cl)
310 clend = len(cl)
320 changesets = clend - clstart
311 changesets = clend - clstart
321 repo.ui.progress(_('changesets'), None)
312 progress.update(None)
322 self.callback = None
313 self.callback = None
323
314
324 # pull off the manifest group
315 # pull off the manifest group
325 repo.ui.status(_("adding manifests\n"))
316 repo.ui.status(_("adding manifests\n"))
326 self._unpackmanifests(repo, revmap, trp, prog, changesets)
317 # We know that we'll never have more manifests than we had
318 # changesets.
319 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
320 total=changesets)
321 self._unpackmanifests(repo, revmap, trp, progress)
327
322
328 needfiles = {}
323 needfiles = {}
329 if repo.ui.configbool('server', 'validate'):
324 if repo.ui.configbool('server', 'validate'):
330 cl = repo.changelog
325 cl = repo.changelog
331 ml = repo.manifestlog
326 ml = repo.manifestlog
332 # validate incoming csets have their manifests
327 # validate incoming csets have their manifests
333 for cset in xrange(clstart, clend):
328 for cset in xrange(clstart, clend):
334 mfnode = cl.changelogrevision(cset).manifest
329 mfnode = cl.changelogrevision(cset).manifest
335 mfest = ml[mfnode].readdelta()
330 mfest = ml[mfnode].readdelta()
336 # store file cgnodes we must see
331 # store file cgnodes we must see
337 for f, n in mfest.iteritems():
332 for f, n in mfest.iteritems():
338 needfiles.setdefault(f, set()).add(n)
333 needfiles.setdefault(f, set()).add(n)
339
334
340 # process the files
335 # process the files
341 repo.ui.status(_("adding file changes\n"))
336 repo.ui.status(_("adding file changes\n"))
342 newrevs, newfiles = _addchangegroupfiles(
337 newrevs, newfiles = _addchangegroupfiles(
343 repo, self, revmap, trp, efiles, needfiles)
338 repo, self, revmap, trp, efiles, needfiles)
344 revisions += newrevs
339 revisions += newrevs
345 files += newfiles
340 files += newfiles
346
341
347 deltaheads = 0
342 deltaheads = 0
348 if oldheads:
343 if oldheads:
349 heads = cl.heads()
344 heads = cl.heads()
350 deltaheads = len(heads) - len(oldheads)
345 deltaheads = len(heads) - len(oldheads)
351 for h in heads:
346 for h in heads:
352 if h not in oldheads and repo[h].closesbranch():
347 if h not in oldheads and repo[h].closesbranch():
353 deltaheads -= 1
348 deltaheads -= 1
354 htext = ""
349 htext = ""
355 if deltaheads:
350 if deltaheads:
356 htext = _(" (%+d heads)") % deltaheads
351 htext = _(" (%+d heads)") % deltaheads
357
352
358 repo.ui.status(_("added %d changesets"
353 repo.ui.status(_("added %d changesets"
359 " with %d changes to %d files%s\n")
354 " with %d changes to %d files%s\n")
360 % (changesets, revisions, files, htext))
355 % (changesets, revisions, files, htext))
361 repo.invalidatevolatilesets()
356 repo.invalidatevolatilesets()
362
357
363 if changesets > 0:
358 if changesets > 0:
364 if 'node' not in tr.hookargs:
359 if 'node' not in tr.hookargs:
365 tr.hookargs['node'] = hex(cl.node(clstart))
360 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
361 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 hookargs = dict(tr.hookargs)
362 hookargs = dict(tr.hookargs)
368 else:
363 else:
369 hookargs = dict(tr.hookargs)
364 hookargs = dict(tr.hookargs)
370 hookargs['node'] = hex(cl.node(clstart))
365 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
366 hookargs['node_last'] = hex(cl.node(clend - 1))
372 repo.hook('pretxnchangegroup',
367 repo.hook('pretxnchangegroup',
373 throw=True, **pycompat.strkwargs(hookargs))
368 throw=True, **pycompat.strkwargs(hookargs))
374
369
375 added = [cl.node(r) for r in xrange(clstart, clend)]
370 added = [cl.node(r) for r in xrange(clstart, clend)]
376 phaseall = None
371 phaseall = None
377 if srctype in ('push', 'serve'):
372 if srctype in ('push', 'serve'):
378 # Old servers can not push the boundary themselves.
373 # Old servers can not push the boundary themselves.
379 # New servers won't push the boundary if changeset already
374 # New servers won't push the boundary if changeset already
380 # exists locally as secret
375 # exists locally as secret
381 #
376 #
382 # We should not use added here but the list of all change in
377 # We should not use added here but the list of all change in
383 # the bundle
378 # the bundle
384 if repo.publishing():
379 if repo.publishing():
385 targetphase = phaseall = phases.public
380 targetphase = phaseall = phases.public
386 else:
381 else:
387 # closer target phase computation
382 # closer target phase computation
388
383
389 # Those changesets have been pushed from the
384 # Those changesets have been pushed from the
390 # outside, their phases are going to be pushed
385 # outside, their phases are going to be pushed
391 # alongside. Therefor `targetphase` is
386 # alongside. Therefor `targetphase` is
392 # ignored.
387 # ignored.
393 targetphase = phaseall = phases.draft
388 targetphase = phaseall = phases.draft
394 if added:
389 if added:
395 phases.registernew(repo, tr, targetphase, added)
390 phases.registernew(repo, tr, targetphase, added)
396 if phaseall is not None:
391 if phaseall is not None:
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
392 phases.advanceboundary(repo, tr, phaseall, cgnodes)
398
393
399 if changesets > 0:
394 if changesets > 0:
400
395
401 def runhooks():
396 def runhooks():
402 # These hooks run when the lock releases, not when the
397 # These hooks run when the lock releases, not when the
403 # transaction closes. So it's possible for the changelog
398 # transaction closes. So it's possible for the changelog
404 # to have changed since we last saw it.
399 # to have changed since we last saw it.
405 if clstart >= len(repo):
400 if clstart >= len(repo):
406 return
401 return
407
402
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
403 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
409
404
410 for n in added:
405 for n in added:
411 args = hookargs.copy()
406 args = hookargs.copy()
412 args['node'] = hex(n)
407 args['node'] = hex(n)
413 del args['node_last']
408 del args['node_last']
414 repo.hook("incoming", **pycompat.strkwargs(args))
409 repo.hook("incoming", **pycompat.strkwargs(args))
415
410
416 newheads = [h for h in repo.heads()
411 newheads = [h for h in repo.heads()
417 if h not in oldheads]
412 if h not in oldheads]
418 repo.ui.log("incoming",
413 repo.ui.log("incoming",
419 "%d incoming changes - new heads: %s\n",
414 "%d incoming changes - new heads: %s\n",
420 len(added),
415 len(added),
421 ', '.join([hex(c[:6]) for c in newheads]))
416 ', '.join([hex(c[:6]) for c in newheads]))
422
417
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
418 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
424 lambda tr: repo._afterlock(runhooks))
419 lambda tr: repo._afterlock(runhooks))
425 finally:
420 finally:
426 repo.ui.flush()
421 repo.ui.flush()
427 # never return 0 here:
422 # never return 0 here:
428 if deltaheads < 0:
423 if deltaheads < 0:
429 ret = deltaheads - 1
424 ret = deltaheads - 1
430 else:
425 else:
431 ret = deltaheads + 1
426 ret = deltaheads + 1
432 return ret
427 return ret
433
428
434 def deltaiter(self):
429 def deltaiter(self):
435 """
430 """
436 returns an iterator of the deltas in this changegroup
431 returns an iterator of the deltas in this changegroup
437
432
438 Useful for passing to the underlying storage system to be stored.
433 Useful for passing to the underlying storage system to be stored.
439 """
434 """
440 chain = None
435 chain = None
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
436 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
437 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
443 yield chunkdata
438 yield chunkdata
444 chain = chunkdata[0]
439 chain = chunkdata[0]
445
440
446 class cg2unpacker(cg1unpacker):
441 class cg2unpacker(cg1unpacker):
447 """Unpacker for cg2 streams.
442 """Unpacker for cg2 streams.
448
443
449 cg2 streams add support for generaldelta, so the delta header
444 cg2 streams add support for generaldelta, so the delta header
450 format is slightly different. All other features about the data
445 format is slightly different. All other features about the data
451 remain the same.
446 remain the same.
452 """
447 """
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
448 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
454 deltaheadersize = struct.calcsize(deltaheader)
449 deltaheadersize = struct.calcsize(deltaheader)
455 version = '02'
450 version = '02'
456
451
457 def _deltaheader(self, headertuple, prevnode):
452 def _deltaheader(self, headertuple, prevnode):
458 node, p1, p2, deltabase, cs = headertuple
453 node, p1, p2, deltabase, cs = headertuple
459 flags = 0
454 flags = 0
460 return node, p1, p2, deltabase, cs, flags
455 return node, p1, p2, deltabase, cs, flags
461
456
462 class cg3unpacker(cg2unpacker):
457 class cg3unpacker(cg2unpacker):
463 """Unpacker for cg3 streams.
458 """Unpacker for cg3 streams.
464
459
465 cg3 streams add support for exchanging treemanifests and revlog
460 cg3 streams add support for exchanging treemanifests and revlog
466 flags. It adds the revlog flags to the delta header and an empty chunk
461 flags. It adds the revlog flags to the delta header and an empty chunk
467 separating manifests and files.
462 separating manifests and files.
468 """
463 """
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
464 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
470 deltaheadersize = struct.calcsize(deltaheader)
465 deltaheadersize = struct.calcsize(deltaheader)
471 version = '03'
466 version = '03'
472 _grouplistcount = 2 # One list of manifests and one list of files
467 _grouplistcount = 2 # One list of manifests and one list of files
473
468
474 def _deltaheader(self, headertuple, prevnode):
469 def _deltaheader(self, headertuple, prevnode):
475 node, p1, p2, deltabase, cs, flags = headertuple
470 node, p1, p2, deltabase, cs, flags = headertuple
476 return node, p1, p2, deltabase, cs, flags
471 return node, p1, p2, deltabase, cs, flags
477
472
478 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
473 def _unpackmanifests(self, repo, revmap, trp, prog):
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
474 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
480 numchanges)
481 for chunkdata in iter(self.filelogheader, {}):
475 for chunkdata in iter(self.filelogheader, {}):
482 # If we get here, there are directory manifests in the changegroup
476 # If we get here, there are directory manifests in the changegroup
483 d = chunkdata["filename"]
477 d = chunkdata["filename"]
484 repo.ui.debug("adding %s revisions\n" % d)
478 repo.ui.debug("adding %s revisions\n" % d)
485 dirlog = repo.manifestlog._revlog.dirlog(d)
479 dirlog = repo.manifestlog._revlog.dirlog(d)
486 deltas = self.deltaiter()
480 deltas = self.deltaiter()
487 if not dirlog.addgroup(deltas, revmap, trp):
481 if not dirlog.addgroup(deltas, revmap, trp):
488 raise error.Abort(_("received dir revlog group is empty"))
482 raise error.Abort(_("received dir revlog group is empty"))
489
483
490 class headerlessfixup(object):
484 class headerlessfixup(object):
491 def __init__(self, fh, h):
485 def __init__(self, fh, h):
492 self._h = h
486 self._h = h
493 self._fh = fh
487 self._fh = fh
494 def read(self, n):
488 def read(self, n):
495 if self._h:
489 if self._h:
496 d, self._h = self._h[:n], self._h[n:]
490 d, self._h = self._h[:n], self._h[n:]
497 if len(d) < n:
491 if len(d) < n:
498 d += readexactly(self._fh, n - len(d))
492 d += readexactly(self._fh, n - len(d))
499 return d
493 return d
500 return readexactly(self._fh, n)
494 return readexactly(self._fh, n)
501
495
502 class cg1packer(object):
496 class cg1packer(object):
503 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
497 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
504 version = '01'
498 version = '01'
505 def __init__(self, repo, bundlecaps=None):
499 def __init__(self, repo, bundlecaps=None):
506 """Given a source repo, construct a bundler.
500 """Given a source repo, construct a bundler.
507
501
508 bundlecaps is optional and can be used to specify the set of
502 bundlecaps is optional and can be used to specify the set of
509 capabilities which can be used to build the bundle. While bundlecaps is
503 capabilities which can be used to build the bundle. While bundlecaps is
510 unused in core Mercurial, extensions rely on this feature to communicate
504 unused in core Mercurial, extensions rely on this feature to communicate
511 capabilities to customize the changegroup packer.
505 capabilities to customize the changegroup packer.
512 """
506 """
513 # Set of capabilities we can use to build the bundle.
507 # Set of capabilities we can use to build the bundle.
514 if bundlecaps is None:
508 if bundlecaps is None:
515 bundlecaps = set()
509 bundlecaps = set()
516 self._bundlecaps = bundlecaps
510 self._bundlecaps = bundlecaps
517 # experimental config: bundle.reorder
511 # experimental config: bundle.reorder
518 reorder = repo.ui.config('bundle', 'reorder')
512 reorder = repo.ui.config('bundle', 'reorder')
519 if reorder == 'auto':
513 if reorder == 'auto':
520 reorder = None
514 reorder = None
521 else:
515 else:
522 reorder = stringutil.parsebool(reorder)
516 reorder = stringutil.parsebool(reorder)
523 self._repo = repo
517 self._repo = repo
524 self._reorder = reorder
518 self._reorder = reorder
525 self._progress = repo.ui.progress
519 self._progress = repo.ui.progress
526 if self._repo.ui.verbose and not self._repo.ui.debugflag:
520 if self._repo.ui.verbose and not self._repo.ui.debugflag:
527 self._verbosenote = self._repo.ui.note
521 self._verbosenote = self._repo.ui.note
528 else:
522 else:
529 self._verbosenote = lambda s: None
523 self._verbosenote = lambda s: None
530
524
531 def close(self):
525 def close(self):
532 return closechunk()
526 return closechunk()
533
527
534 def fileheader(self, fname):
528 def fileheader(self, fname):
535 return chunkheader(len(fname)) + fname
529 return chunkheader(len(fname)) + fname
536
530
537 # Extracted both for clarity and for overriding in extensions.
531 # Extracted both for clarity and for overriding in extensions.
538 def _sortgroup(self, revlog, nodelist, lookup):
532 def _sortgroup(self, revlog, nodelist, lookup):
539 """Sort nodes for change group and turn them into revnums."""
533 """Sort nodes for change group and turn them into revnums."""
540 # for generaldelta revlogs, we linearize the revs; this will both be
534 # for generaldelta revlogs, we linearize the revs; this will both be
541 # much quicker and generate a much smaller bundle
535 # much quicker and generate a much smaller bundle
542 if (revlog._generaldelta and self._reorder is None) or self._reorder:
536 if (revlog._generaldelta and self._reorder is None) or self._reorder:
543 dag = dagutil.revlogdag(revlog)
537 dag = dagutil.revlogdag(revlog)
544 return dag.linearize(set(revlog.rev(n) for n in nodelist))
538 return dag.linearize(set(revlog.rev(n) for n in nodelist))
545 else:
539 else:
546 return sorted([revlog.rev(n) for n in nodelist])
540 return sorted([revlog.rev(n) for n in nodelist])
547
541
548 def group(self, nodelist, revlog, lookup, units=None):
542 def group(self, nodelist, revlog, lookup, units=None):
549 """Calculate a delta group, yielding a sequence of changegroup chunks
543 """Calculate a delta group, yielding a sequence of changegroup chunks
550 (strings).
544 (strings).
551
545
552 Given a list of changeset revs, return a set of deltas and
546 Given a list of changeset revs, return a set of deltas and
553 metadata corresponding to nodes. The first delta is
547 metadata corresponding to nodes. The first delta is
554 first parent(nodelist[0]) -> nodelist[0], the receiver is
548 first parent(nodelist[0]) -> nodelist[0], the receiver is
555 guaranteed to have this parent as it has all history before
549 guaranteed to have this parent as it has all history before
556 these changesets. In the case firstparent is nullrev the
550 these changesets. In the case firstparent is nullrev the
557 changegroup starts with a full revision.
551 changegroup starts with a full revision.
558
552
559 If units is not None, progress detail will be generated, units specifies
553 If units is not None, progress detail will be generated, units specifies
560 the type of revlog that is touched (changelog, manifest, etc.).
554 the type of revlog that is touched (changelog, manifest, etc.).
561 """
555 """
562 # if we don't have any revisions touched by these changesets, bail
556 # if we don't have any revisions touched by these changesets, bail
563 if len(nodelist) == 0:
557 if len(nodelist) == 0:
564 yield self.close()
558 yield self.close()
565 return
559 return
566
560
567 revs = self._sortgroup(revlog, nodelist, lookup)
561 revs = self._sortgroup(revlog, nodelist, lookup)
568
562
569 # add the parent of the first rev
563 # add the parent of the first rev
570 p = revlog.parentrevs(revs[0])[0]
564 p = revlog.parentrevs(revs[0])[0]
571 revs.insert(0, p)
565 revs.insert(0, p)
572
566
573 # build deltas
567 # build deltas
574 total = len(revs) - 1
568 total = len(revs) - 1
575 msgbundling = _('bundling')
569 msgbundling = _('bundling')
576 for r in xrange(len(revs) - 1):
570 for r in xrange(len(revs) - 1):
577 if units is not None:
571 if units is not None:
578 self._progress(msgbundling, r + 1, unit=units, total=total)
572 self._progress(msgbundling, r + 1, unit=units, total=total)
579 prev, curr = revs[r], revs[r + 1]
573 prev, curr = revs[r], revs[r + 1]
580 linknode = lookup(revlog.node(curr))
574 linknode = lookup(revlog.node(curr))
581 for c in self.revchunk(revlog, curr, prev, linknode):
575 for c in self.revchunk(revlog, curr, prev, linknode):
582 yield c
576 yield c
583
577
584 if units is not None:
578 if units is not None:
585 self._progress(msgbundling, None)
579 self._progress(msgbundling, None)
586 yield self.close()
580 yield self.close()
587
581
588 # filter any nodes that claim to be part of the known set
582 # filter any nodes that claim to be part of the known set
589 def prune(self, revlog, missing, commonrevs):
583 def prune(self, revlog, missing, commonrevs):
590 rr, rl = revlog.rev, revlog.linkrev
584 rr, rl = revlog.rev, revlog.linkrev
591 return [n for n in missing if rl(rr(n)) not in commonrevs]
585 return [n for n in missing if rl(rr(n)) not in commonrevs]
592
586
593 def _packmanifests(self, dir, mfnodes, lookuplinknode):
587 def _packmanifests(self, dir, mfnodes, lookuplinknode):
594 """Pack flat manifests into a changegroup stream."""
588 """Pack flat manifests into a changegroup stream."""
595 assert not dir
589 assert not dir
596 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
590 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
597 lookuplinknode, units=_('manifests')):
591 lookuplinknode, units=_('manifests')):
598 yield chunk
592 yield chunk
599
593
600 def _manifestsdone(self):
594 def _manifestsdone(self):
601 return ''
595 return ''
602
596
603 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
597 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
604 '''yield a sequence of changegroup chunks (strings)'''
598 '''yield a sequence of changegroup chunks (strings)'''
605 repo = self._repo
599 repo = self._repo
606 cl = repo.changelog
600 cl = repo.changelog
607
601
608 clrevorder = {}
602 clrevorder = {}
609 mfs = {} # needed manifests
603 mfs = {} # needed manifests
610 fnodes = {} # needed file nodes
604 fnodes = {} # needed file nodes
611 changedfiles = set()
605 changedfiles = set()
612
606
613 # Callback for the changelog, used to collect changed files and manifest
607 # Callback for the changelog, used to collect changed files and manifest
614 # nodes.
608 # nodes.
615 # Returns the linkrev node (identity in the changelog case).
609 # Returns the linkrev node (identity in the changelog case).
616 def lookupcl(x):
610 def lookupcl(x):
617 c = cl.read(x)
611 c = cl.read(x)
618 clrevorder[x] = len(clrevorder)
612 clrevorder[x] = len(clrevorder)
619 n = c[0]
613 n = c[0]
620 # record the first changeset introducing this manifest version
614 # record the first changeset introducing this manifest version
621 mfs.setdefault(n, x)
615 mfs.setdefault(n, x)
622 # Record a complete list of potentially-changed files in
616 # Record a complete list of potentially-changed files in
623 # this manifest.
617 # this manifest.
624 changedfiles.update(c[3])
618 changedfiles.update(c[3])
625 return x
619 return x
626
620
627 self._verbosenote(_('uncompressed size of bundle content:\n'))
621 self._verbosenote(_('uncompressed size of bundle content:\n'))
628 size = 0
622 size = 0
629 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
623 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
630 size += len(chunk)
624 size += len(chunk)
631 yield chunk
625 yield chunk
632 self._verbosenote(_('%8.i (changelog)\n') % size)
626 self._verbosenote(_('%8.i (changelog)\n') % size)
633
627
634 # We need to make sure that the linkrev in the changegroup refers to
628 # We need to make sure that the linkrev in the changegroup refers to
635 # the first changeset that introduced the manifest or file revision.
629 # the first changeset that introduced the manifest or file revision.
636 # The fastpath is usually safer than the slowpath, because the filelogs
630 # The fastpath is usually safer than the slowpath, because the filelogs
637 # are walked in revlog order.
631 # are walked in revlog order.
638 #
632 #
639 # When taking the slowpath with reorder=None and the manifest revlog
633 # When taking the slowpath with reorder=None and the manifest revlog
640 # uses generaldelta, the manifest may be walked in the "wrong" order.
634 # uses generaldelta, the manifest may be walked in the "wrong" order.
641 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
635 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
642 # cc0ff93d0c0c).
636 # cc0ff93d0c0c).
643 #
637 #
644 # When taking the fastpath, we are only vulnerable to reordering
638 # When taking the fastpath, we are only vulnerable to reordering
645 # of the changelog itself. The changelog never uses generaldelta, so
639 # of the changelog itself. The changelog never uses generaldelta, so
646 # it is only reordered when reorder=True. To handle this case, we
640 # it is only reordered when reorder=True. To handle this case, we
647 # simply take the slowpath, which already has the 'clrevorder' logic.
641 # simply take the slowpath, which already has the 'clrevorder' logic.
648 # This was also fixed in cc0ff93d0c0c.
642 # This was also fixed in cc0ff93d0c0c.
649 fastpathlinkrev = fastpathlinkrev and not self._reorder
643 fastpathlinkrev = fastpathlinkrev and not self._reorder
650 # Treemanifests don't work correctly with fastpathlinkrev
644 # Treemanifests don't work correctly with fastpathlinkrev
651 # either, because we don't discover which directory nodes to
645 # either, because we don't discover which directory nodes to
652 # send along with files. This could probably be fixed.
646 # send along with files. This could probably be fixed.
653 fastpathlinkrev = fastpathlinkrev and (
647 fastpathlinkrev = fastpathlinkrev and (
654 'treemanifest' not in repo.requirements)
648 'treemanifest' not in repo.requirements)
655
649
656 for chunk in self.generatemanifests(commonrevs, clrevorder,
650 for chunk in self.generatemanifests(commonrevs, clrevorder,
657 fastpathlinkrev, mfs, fnodes, source):
651 fastpathlinkrev, mfs, fnodes, source):
658 yield chunk
652 yield chunk
659 mfs.clear()
653 mfs.clear()
660 clrevs = set(cl.rev(x) for x in clnodes)
654 clrevs = set(cl.rev(x) for x in clnodes)
661
655
662 if not fastpathlinkrev:
656 if not fastpathlinkrev:
663 def linknodes(unused, fname):
657 def linknodes(unused, fname):
664 return fnodes.get(fname, {})
658 return fnodes.get(fname, {})
665 else:
659 else:
666 cln = cl.node
660 cln = cl.node
667 def linknodes(filerevlog, fname):
661 def linknodes(filerevlog, fname):
668 llr = filerevlog.linkrev
662 llr = filerevlog.linkrev
669 fln = filerevlog.node
663 fln = filerevlog.node
670 revs = ((r, llr(r)) for r in filerevlog)
664 revs = ((r, llr(r)) for r in filerevlog)
671 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
665 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
672
666
673 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
667 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
674 source):
668 source):
675 yield chunk
669 yield chunk
676
670
677 yield self.close()
671 yield self.close()
678
672
679 if clnodes:
673 if clnodes:
680 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
674 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
681
675
682 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
676 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
683 fnodes, source):
677 fnodes, source):
684 """Returns an iterator of changegroup chunks containing manifests.
678 """Returns an iterator of changegroup chunks containing manifests.
685
679
686 `source` is unused here, but is used by extensions like remotefilelog to
680 `source` is unused here, but is used by extensions like remotefilelog to
687 change what is sent based in pulls vs pushes, etc.
681 change what is sent based in pulls vs pushes, etc.
688 """
682 """
689 repo = self._repo
683 repo = self._repo
690 mfl = repo.manifestlog
684 mfl = repo.manifestlog
691 dirlog = mfl._revlog.dirlog
685 dirlog = mfl._revlog.dirlog
692 tmfnodes = {'': mfs}
686 tmfnodes = {'': mfs}
693
687
694 # Callback for the manifest, used to collect linkrevs for filelog
688 # Callback for the manifest, used to collect linkrevs for filelog
695 # revisions.
689 # revisions.
696 # Returns the linkrev node (collected in lookupcl).
690 # Returns the linkrev node (collected in lookupcl).
697 def makelookupmflinknode(dir, nodes):
691 def makelookupmflinknode(dir, nodes):
698 if fastpathlinkrev:
692 if fastpathlinkrev:
699 assert not dir
693 assert not dir
700 return mfs.__getitem__
694 return mfs.__getitem__
701
695
702 def lookupmflinknode(x):
696 def lookupmflinknode(x):
703 """Callback for looking up the linknode for manifests.
697 """Callback for looking up the linknode for manifests.
704
698
705 Returns the linkrev node for the specified manifest.
699 Returns the linkrev node for the specified manifest.
706
700
707 SIDE EFFECT:
701 SIDE EFFECT:
708
702
709 1) fclnodes gets populated with the list of relevant
703 1) fclnodes gets populated with the list of relevant
710 file nodes if we're not using fastpathlinkrev
704 file nodes if we're not using fastpathlinkrev
711 2) When treemanifests are in use, collects treemanifest nodes
705 2) When treemanifests are in use, collects treemanifest nodes
712 to send
706 to send
713
707
714 Note that this means manifests must be completely sent to
708 Note that this means manifests must be completely sent to
715 the client before you can trust the list of files and
709 the client before you can trust the list of files and
716 treemanifests to send.
710 treemanifests to send.
717 """
711 """
718 clnode = nodes[x]
712 clnode = nodes[x]
719 mdata = mfl.get(dir, x).readfast(shallow=True)
713 mdata = mfl.get(dir, x).readfast(shallow=True)
720 for p, n, fl in mdata.iterentries():
714 for p, n, fl in mdata.iterentries():
721 if fl == 't': # subdirectory manifest
715 if fl == 't': # subdirectory manifest
722 subdir = dir + p + '/'
716 subdir = dir + p + '/'
723 tmfclnodes = tmfnodes.setdefault(subdir, {})
717 tmfclnodes = tmfnodes.setdefault(subdir, {})
724 tmfclnode = tmfclnodes.setdefault(n, clnode)
718 tmfclnode = tmfclnodes.setdefault(n, clnode)
725 if clrevorder[clnode] < clrevorder[tmfclnode]:
719 if clrevorder[clnode] < clrevorder[tmfclnode]:
726 tmfclnodes[n] = clnode
720 tmfclnodes[n] = clnode
727 else:
721 else:
728 f = dir + p
722 f = dir + p
729 fclnodes = fnodes.setdefault(f, {})
723 fclnodes = fnodes.setdefault(f, {})
730 fclnode = fclnodes.setdefault(n, clnode)
724 fclnode = fclnodes.setdefault(n, clnode)
731 if clrevorder[clnode] < clrevorder[fclnode]:
725 if clrevorder[clnode] < clrevorder[fclnode]:
732 fclnodes[n] = clnode
726 fclnodes[n] = clnode
733 return clnode
727 return clnode
734 return lookupmflinknode
728 return lookupmflinknode
735
729
736 size = 0
730 size = 0
737 while tmfnodes:
731 while tmfnodes:
738 dir, nodes = tmfnodes.popitem()
732 dir, nodes = tmfnodes.popitem()
739 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
733 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
740 if not dir or prunednodes:
734 if not dir or prunednodes:
741 for x in self._packmanifests(dir, prunednodes,
735 for x in self._packmanifests(dir, prunednodes,
742 makelookupmflinknode(dir, nodes)):
736 makelookupmflinknode(dir, nodes)):
743 size += len(x)
737 size += len(x)
744 yield x
738 yield x
745 self._verbosenote(_('%8.i (manifests)\n') % size)
739 self._verbosenote(_('%8.i (manifests)\n') % size)
746 yield self._manifestsdone()
740 yield self._manifestsdone()
747
741
748 # The 'source' parameter is useful for extensions
742 # The 'source' parameter is useful for extensions
749 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
743 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
750 repo = self._repo
744 repo = self._repo
751 progress = self._progress
745 progress = self._progress
752 msgbundling = _('bundling')
746 msgbundling = _('bundling')
753
747
754 total = len(changedfiles)
748 total = len(changedfiles)
755 # for progress output
749 # for progress output
756 msgfiles = _('files')
750 msgfiles = _('files')
757 for i, fname in enumerate(sorted(changedfiles)):
751 for i, fname in enumerate(sorted(changedfiles)):
758 filerevlog = repo.file(fname)
752 filerevlog = repo.file(fname)
759 if not filerevlog:
753 if not filerevlog:
760 raise error.Abort(_("empty or missing file data for %s") %
754 raise error.Abort(_("empty or missing file data for %s") %
761 fname)
755 fname)
762
756
763 linkrevnodes = linknodes(filerevlog, fname)
757 linkrevnodes = linknodes(filerevlog, fname)
764 # Lookup for filenodes, we collected the linkrev nodes above in the
758 # Lookup for filenodes, we collected the linkrev nodes above in the
765 # fastpath case and with lookupmf in the slowpath case.
759 # fastpath case and with lookupmf in the slowpath case.
766 def lookupfilelog(x):
760 def lookupfilelog(x):
767 return linkrevnodes[x]
761 return linkrevnodes[x]
768
762
769 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
763 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
770 if filenodes:
764 if filenodes:
771 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
765 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
772 total=total)
766 total=total)
773 h = self.fileheader(fname)
767 h = self.fileheader(fname)
774 size = len(h)
768 size = len(h)
775 yield h
769 yield h
776 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
770 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
777 size += len(chunk)
771 size += len(chunk)
778 yield chunk
772 yield chunk
779 self._verbosenote(_('%8.i %s\n') % (size, fname))
773 self._verbosenote(_('%8.i %s\n') % (size, fname))
780 progress(msgbundling, None)
774 progress(msgbundling, None)
781
775
782 def deltaparent(self, revlog, rev, p1, p2, prev):
776 def deltaparent(self, revlog, rev, p1, p2, prev):
783 if not revlog.candelta(prev, rev):
777 if not revlog.candelta(prev, rev):
784 raise error.ProgrammingError('cg1 should not be used in this case')
778 raise error.ProgrammingError('cg1 should not be used in this case')
785 return prev
779 return prev
786
780
787 def revchunk(self, revlog, rev, prev, linknode):
781 def revchunk(self, revlog, rev, prev, linknode):
788 node = revlog.node(rev)
782 node = revlog.node(rev)
789 p1, p2 = revlog.parentrevs(rev)
783 p1, p2 = revlog.parentrevs(rev)
790 base = self.deltaparent(revlog, rev, p1, p2, prev)
784 base = self.deltaparent(revlog, rev, p1, p2, prev)
791
785
792 prefix = ''
786 prefix = ''
793 if revlog.iscensored(base) or revlog.iscensored(rev):
787 if revlog.iscensored(base) or revlog.iscensored(rev):
794 try:
788 try:
795 delta = revlog.revision(node, raw=True)
789 delta = revlog.revision(node, raw=True)
796 except error.CensoredNodeError as e:
790 except error.CensoredNodeError as e:
797 delta = e.tombstone
791 delta = e.tombstone
798 if base == nullrev:
792 if base == nullrev:
799 prefix = mdiff.trivialdiffheader(len(delta))
793 prefix = mdiff.trivialdiffheader(len(delta))
800 else:
794 else:
801 baselen = revlog.rawsize(base)
795 baselen = revlog.rawsize(base)
802 prefix = mdiff.replacediffheader(baselen, len(delta))
796 prefix = mdiff.replacediffheader(baselen, len(delta))
803 elif base == nullrev:
797 elif base == nullrev:
804 delta = revlog.revision(node, raw=True)
798 delta = revlog.revision(node, raw=True)
805 prefix = mdiff.trivialdiffheader(len(delta))
799 prefix = mdiff.trivialdiffheader(len(delta))
806 else:
800 else:
807 delta = revlog.revdiff(base, rev)
801 delta = revlog.revdiff(base, rev)
808 p1n, p2n = revlog.parents(node)
802 p1n, p2n = revlog.parents(node)
809 basenode = revlog.node(base)
803 basenode = revlog.node(base)
810 flags = revlog.flags(rev)
804 flags = revlog.flags(rev)
811 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
805 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
812 meta += prefix
806 meta += prefix
813 l = len(meta) + len(delta)
807 l = len(meta) + len(delta)
814 yield chunkheader(l)
808 yield chunkheader(l)
815 yield meta
809 yield meta
816 yield delta
810 yield delta
817 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
811 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
818 # do nothing with basenode, it is implicitly the previous one in HG10
812 # do nothing with basenode, it is implicitly the previous one in HG10
819 # do nothing with flags, it is implicitly 0 for cg1 and cg2
813 # do nothing with flags, it is implicitly 0 for cg1 and cg2
820 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
814 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
821
815
822 class cg2packer(cg1packer):
816 class cg2packer(cg1packer):
823 version = '02'
817 version = '02'
824 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
818 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
825
819
826 def __init__(self, repo, bundlecaps=None):
820 def __init__(self, repo, bundlecaps=None):
827 super(cg2packer, self).__init__(repo, bundlecaps)
821 super(cg2packer, self).__init__(repo, bundlecaps)
828 if self._reorder is None:
822 if self._reorder is None:
829 # Since generaldelta is directly supported by cg2, reordering
823 # Since generaldelta is directly supported by cg2, reordering
830 # generally doesn't help, so we disable it by default (treating
824 # generally doesn't help, so we disable it by default (treating
831 # bundle.reorder=auto just like bundle.reorder=False).
825 # bundle.reorder=auto just like bundle.reorder=False).
832 self._reorder = False
826 self._reorder = False
833
827
834 def deltaparent(self, revlog, rev, p1, p2, prev):
828 def deltaparent(self, revlog, rev, p1, p2, prev):
835 dp = revlog.deltaparent(rev)
829 dp = revlog.deltaparent(rev)
836 if dp == nullrev and revlog.storedeltachains:
830 if dp == nullrev and revlog.storedeltachains:
837 # Avoid sending full revisions when delta parent is null. Pick prev
831 # Avoid sending full revisions when delta parent is null. Pick prev
838 # in that case. It's tempting to pick p1 in this case, as p1 will
832 # in that case. It's tempting to pick p1 in this case, as p1 will
839 # be smaller in the common case. However, computing a delta against
833 # be smaller in the common case. However, computing a delta against
840 # p1 may require resolving the raw text of p1, which could be
834 # p1 may require resolving the raw text of p1, which could be
841 # expensive. The revlog caches should have prev cached, meaning
835 # expensive. The revlog caches should have prev cached, meaning
842 # less CPU for changegroup generation. There is likely room to add
836 # less CPU for changegroup generation. There is likely room to add
843 # a flag and/or config option to control this behavior.
837 # a flag and/or config option to control this behavior.
844 base = prev
838 base = prev
845 elif dp == nullrev:
839 elif dp == nullrev:
846 # revlog is configured to use full snapshot for a reason,
840 # revlog is configured to use full snapshot for a reason,
847 # stick to full snapshot.
841 # stick to full snapshot.
848 base = nullrev
842 base = nullrev
849 elif dp not in (p1, p2, prev):
843 elif dp not in (p1, p2, prev):
850 # Pick prev when we can't be sure remote has the base revision.
844 # Pick prev when we can't be sure remote has the base revision.
851 return prev
845 return prev
852 else:
846 else:
853 base = dp
847 base = dp
854 if base != nullrev and not revlog.candelta(base, rev):
848 if base != nullrev and not revlog.candelta(base, rev):
855 base = nullrev
849 base = nullrev
856 return base
850 return base
857
851
858 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
852 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
859 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
853 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
860 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
854 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
861
855
862 class cg3packer(cg2packer):
856 class cg3packer(cg2packer):
863 version = '03'
857 version = '03'
864 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
858 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
865
859
866 def _packmanifests(self, dir, mfnodes, lookuplinknode):
860 def _packmanifests(self, dir, mfnodes, lookuplinknode):
867 if dir:
861 if dir:
868 yield self.fileheader(dir)
862 yield self.fileheader(dir)
869
863
870 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
864 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
871 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
865 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
872 units=_('manifests')):
866 units=_('manifests')):
873 yield chunk
867 yield chunk
874
868
875 def _manifestsdone(self):
869 def _manifestsdone(self):
876 return self.close()
870 return self.close()
877
871
878 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
872 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
879 return struct.pack(
873 return struct.pack(
880 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
874 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
881
875
882 _packermap = {'01': (cg1packer, cg1unpacker),
876 _packermap = {'01': (cg1packer, cg1unpacker),
883 # cg2 adds support for exchanging generaldelta
877 # cg2 adds support for exchanging generaldelta
884 '02': (cg2packer, cg2unpacker),
878 '02': (cg2packer, cg2unpacker),
885 # cg3 adds support for exchanging revlog flags and treemanifests
879 # cg3 adds support for exchanging revlog flags and treemanifests
886 '03': (cg3packer, cg3unpacker),
880 '03': (cg3packer, cg3unpacker),
887 }
881 }
888
882
889 def allsupportedversions(repo):
883 def allsupportedversions(repo):
890 versions = set(_packermap.keys())
884 versions = set(_packermap.keys())
891 if not (repo.ui.configbool('experimental', 'changegroup3') or
885 if not (repo.ui.configbool('experimental', 'changegroup3') or
892 repo.ui.configbool('experimental', 'treemanifest') or
886 repo.ui.configbool('experimental', 'treemanifest') or
893 'treemanifest' in repo.requirements):
887 'treemanifest' in repo.requirements):
894 versions.discard('03')
888 versions.discard('03')
895 return versions
889 return versions
896
890
897 # Changegroup versions that can be applied to the repo
891 # Changegroup versions that can be applied to the repo
898 def supportedincomingversions(repo):
892 def supportedincomingversions(repo):
899 return allsupportedversions(repo)
893 return allsupportedversions(repo)
900
894
901 # Changegroup versions that can be created from the repo
895 # Changegroup versions that can be created from the repo
902 def supportedoutgoingversions(repo):
896 def supportedoutgoingversions(repo):
903 versions = allsupportedversions(repo)
897 versions = allsupportedversions(repo)
904 if 'treemanifest' in repo.requirements:
898 if 'treemanifest' in repo.requirements:
905 # Versions 01 and 02 support only flat manifests and it's just too
899 # Versions 01 and 02 support only flat manifests and it's just too
906 # expensive to convert between the flat manifest and tree manifest on
900 # expensive to convert between the flat manifest and tree manifest on
907 # the fly. Since tree manifests are hashed differently, all of history
901 # the fly. Since tree manifests are hashed differently, all of history
908 # would have to be converted. Instead, we simply don't even pretend to
902 # would have to be converted. Instead, we simply don't even pretend to
909 # support versions 01 and 02.
903 # support versions 01 and 02.
910 versions.discard('01')
904 versions.discard('01')
911 versions.discard('02')
905 versions.discard('02')
912 if NARROW_REQUIREMENT in repo.requirements:
906 if NARROW_REQUIREMENT in repo.requirements:
913 # Versions 01 and 02 don't support revlog flags, and we need to
907 # Versions 01 and 02 don't support revlog flags, and we need to
914 # support that for stripping and unbundling to work.
908 # support that for stripping and unbundling to work.
915 versions.discard('01')
909 versions.discard('01')
916 versions.discard('02')
910 versions.discard('02')
917 if LFS_REQUIREMENT in repo.requirements:
911 if LFS_REQUIREMENT in repo.requirements:
918 # Versions 01 and 02 don't support revlog flags, and we need to
912 # Versions 01 and 02 don't support revlog flags, and we need to
919 # mark LFS entries with REVIDX_EXTSTORED.
913 # mark LFS entries with REVIDX_EXTSTORED.
920 versions.discard('01')
914 versions.discard('01')
921 versions.discard('02')
915 versions.discard('02')
922
916
923 return versions
917 return versions
924
918
925 def localversion(repo):
919 def localversion(repo):
926 # Finds the best version to use for bundles that are meant to be used
920 # Finds the best version to use for bundles that are meant to be used
927 # locally, such as those from strip and shelve, and temporary bundles.
921 # locally, such as those from strip and shelve, and temporary bundles.
928 return max(supportedoutgoingversions(repo))
922 return max(supportedoutgoingversions(repo))
929
923
930 def safeversion(repo):
924 def safeversion(repo):
931 # Finds the smallest version that it's safe to assume clients of the repo
925 # Finds the smallest version that it's safe to assume clients of the repo
932 # will support. For example, all hg versions that support generaldelta also
926 # will support. For example, all hg versions that support generaldelta also
933 # support changegroup 02.
927 # support changegroup 02.
934 versions = supportedoutgoingversions(repo)
928 versions = supportedoutgoingversions(repo)
935 if 'generaldelta' in repo.requirements:
929 if 'generaldelta' in repo.requirements:
936 versions.discard('01')
930 versions.discard('01')
937 assert versions
931 assert versions
938 return min(versions)
932 return min(versions)
939
933
940 def getbundler(version, repo, bundlecaps=None):
934 def getbundler(version, repo, bundlecaps=None):
941 assert version in supportedoutgoingversions(repo)
935 assert version in supportedoutgoingversions(repo)
942 return _packermap[version][0](repo, bundlecaps)
936 return _packermap[version][0](repo, bundlecaps)
943
937
944 def getunbundler(version, fh, alg, extras=None):
938 def getunbundler(version, fh, alg, extras=None):
945 return _packermap[version][1](fh, alg, extras=extras)
939 return _packermap[version][1](fh, alg, extras=extras)
946
940
947 def _changegroupinfo(repo, nodes, source):
941 def _changegroupinfo(repo, nodes, source):
948 if repo.ui.verbose or source == 'bundle':
942 if repo.ui.verbose or source == 'bundle':
949 repo.ui.status(_("%d changesets found\n") % len(nodes))
943 repo.ui.status(_("%d changesets found\n") % len(nodes))
950 if repo.ui.debugflag:
944 if repo.ui.debugflag:
951 repo.ui.debug("list of changesets:\n")
945 repo.ui.debug("list of changesets:\n")
952 for node in nodes:
946 for node in nodes:
953 repo.ui.debug("%s\n" % hex(node))
947 repo.ui.debug("%s\n" % hex(node))
954
948
955 def makechangegroup(repo, outgoing, version, source, fastpath=False,
949 def makechangegroup(repo, outgoing, version, source, fastpath=False,
956 bundlecaps=None):
950 bundlecaps=None):
957 cgstream = makestream(repo, outgoing, version, source,
951 cgstream = makestream(repo, outgoing, version, source,
958 fastpath=fastpath, bundlecaps=bundlecaps)
952 fastpath=fastpath, bundlecaps=bundlecaps)
959 return getunbundler(version, util.chunkbuffer(cgstream), None,
953 return getunbundler(version, util.chunkbuffer(cgstream), None,
960 {'clcount': len(outgoing.missing) })
954 {'clcount': len(outgoing.missing) })
961
955
962 def makestream(repo, outgoing, version, source, fastpath=False,
956 def makestream(repo, outgoing, version, source, fastpath=False,
963 bundlecaps=None):
957 bundlecaps=None):
964 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
958 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
965
959
966 repo = repo.unfiltered()
960 repo = repo.unfiltered()
967 commonrevs = outgoing.common
961 commonrevs = outgoing.common
968 csets = outgoing.missing
962 csets = outgoing.missing
969 heads = outgoing.missingheads
963 heads = outgoing.missingheads
970 # We go through the fast path if we get told to, or if all (unfiltered
964 # We go through the fast path if we get told to, or if all (unfiltered
971 # heads have been requested (since we then know there all linkrevs will
965 # heads have been requested (since we then know there all linkrevs will
972 # be pulled by the client).
966 # be pulled by the client).
973 heads.sort()
967 heads.sort()
974 fastpathlinkrev = fastpath or (
968 fastpathlinkrev = fastpath or (
975 repo.filtername is None and heads == sorted(repo.heads()))
969 repo.filtername is None and heads == sorted(repo.heads()))
976
970
977 repo.hook('preoutgoing', throw=True, source=source)
971 repo.hook('preoutgoing', throw=True, source=source)
978 _changegroupinfo(repo, csets, source)
972 _changegroupinfo(repo, csets, source)
979 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
973 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
980
974
981 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
975 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
982 revisions = 0
976 revisions = 0
983 files = 0
977 files = 0
984 for chunkdata in iter(source.filelogheader, {}):
978 for chunkdata in iter(source.filelogheader, {}):
985 files += 1
979 files += 1
986 f = chunkdata["filename"]
980 f = chunkdata["filename"]
987 repo.ui.debug("adding %s revisions\n" % f)
981 repo.ui.debug("adding %s revisions\n" % f)
988 repo.ui.progress(_('files'), files, unit=_('files'),
982 repo.ui.progress(_('files'), files, unit=_('files'),
989 total=expectedfiles)
983 total=expectedfiles)
990 fl = repo.file(f)
984 fl = repo.file(f)
991 o = len(fl)
985 o = len(fl)
992 try:
986 try:
993 deltas = source.deltaiter()
987 deltas = source.deltaiter()
994 if not fl.addgroup(deltas, revmap, trp):
988 if not fl.addgroup(deltas, revmap, trp):
995 raise error.Abort(_("received file revlog group is empty"))
989 raise error.Abort(_("received file revlog group is empty"))
996 except error.CensoredBaseError as e:
990 except error.CensoredBaseError as e:
997 raise error.Abort(_("received delta base is censored: %s") % e)
991 raise error.Abort(_("received delta base is censored: %s") % e)
998 revisions += len(fl) - o
992 revisions += len(fl) - o
999 if f in needfiles:
993 if f in needfiles:
1000 needs = needfiles[f]
994 needs = needfiles[f]
1001 for new in xrange(o, len(fl)):
995 for new in xrange(o, len(fl)):
1002 n = fl.node(new)
996 n = fl.node(new)
1003 if n in needs:
997 if n in needs:
1004 needs.remove(n)
998 needs.remove(n)
1005 else:
999 else:
1006 raise error.Abort(
1000 raise error.Abort(
1007 _("received spurious file revlog entry"))
1001 _("received spurious file revlog entry"))
1008 if not needs:
1002 if not needs:
1009 del needfiles[f]
1003 del needfiles[f]
1010 repo.ui.progress(_('files'), None)
1004 repo.ui.progress(_('files'), None)
1011
1005
1012 for f, needs in needfiles.iteritems():
1006 for f, needs in needfiles.iteritems():
1013 fl = repo.file(f)
1007 fl = repo.file(f)
1014 for n in needs:
1008 for n in needs:
1015 try:
1009 try:
1016 fl.rev(n)
1010 fl.rev(n)
1017 except error.LookupError:
1011 except error.LookupError:
1018 raise error.Abort(
1012 raise error.Abort(
1019 _('missing file data for %s:%s - run hg verify') %
1013 _('missing file data for %s:%s - run hg verify') %
1020 (f, hex(n)))
1014 (f, hex(n)))
1021
1015
1022 return revisions, files
1016 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now