##// END OF EJS Templates
remotefilelog: use progress helper in shallowbundle...
Martin von Zweigbergk -
r40879:4e08bbfc default
parent child Browse files
Show More
@@ -1,294 +1,293 b''
1 1 # shallowbundle.py - bundle10 implementation for use with shallow repositories
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 from mercurial.i18n import _
10 10 from mercurial.node import bin, hex, nullid
11 11 from mercurial import (
12 12 bundlerepo,
13 13 changegroup,
14 14 error,
15 15 match,
16 16 mdiff,
17 17 pycompat,
18 18 )
19 19 from . import (
20 20 constants,
21 21 remotefilelog,
22 22 shallowutil,
23 23 )
24 24
25 25 NoFiles = 0
26 26 LocalFiles = 1
27 27 AllFiles = 2
28 28
29 29 def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
30 30 if not isinstance(rlog, remotefilelog.remotefilelog):
31 31 for c in super(cls, self).group(nodelist, rlog, lookup,
32 32 units=units):
33 33 yield c
34 34 return
35 35
36 36 if len(nodelist) == 0:
37 37 yield self.close()
38 38 return
39 39
40 40 nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
41 41
42 42 # add the parent of the first rev
43 43 p = rlog.parents(nodelist[0])[0]
44 44 nodelist.insert(0, p)
45 45
46 46 # build deltas
47 47 for i in pycompat.xrange(len(nodelist) - 1):
48 48 prev, curr = nodelist[i], nodelist[i + 1]
49 49 linknode = lookup(curr)
50 50 for c in self.nodechunk(rlog, curr, prev, linknode):
51 51 yield c
52 52
53 53 yield self.close()
54 54
55 55 class shallowcg1packer(changegroup.cgpacker):
56 56 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
57 57 if shallowutil.isenabled(self._repo):
58 58 fastpathlinkrev = False
59 59
60 60 return super(shallowcg1packer, self).generate(commonrevs, clnodes,
61 61 fastpathlinkrev, source)
62 62
63 63 def group(self, nodelist, rlog, lookup, units=None, reorder=None):
64 64 return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup,
65 65 units=units)
66 66
67 67 def generatefiles(self, changedfiles, *args):
68 68 try:
69 69 linknodes, commonrevs, source = args
70 70 except ValueError:
71 71 commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
72 72 if shallowutil.isenabled(self._repo):
73 73 repo = self._repo
74 74 if isinstance(repo, bundlerepo.bundlerepository):
75 75 # If the bundle contains filelogs, we can't pull from it, since
76 76 # bundlerepo is heavily tied to revlogs. Instead require that
77 77 # the user use unbundle instead.
78 78 # Force load the filelog data.
79 79 bundlerepo.bundlerepository.file(repo, 'foo')
80 80 if repo._cgfilespos:
81 81 raise error.Abort("cannot pull from full bundles",
82 82 hint="use `hg unbundle` instead")
83 83 return []
84 84 filestosend = self.shouldaddfilegroups(source)
85 85 if filestosend == NoFiles:
86 86 changedfiles = list([f for f in changedfiles
87 87 if not repo.shallowmatch(f)])
88 88
89 89 return super(shallowcg1packer, self).generatefiles(
90 90 changedfiles, *args)
91 91
92 92 def shouldaddfilegroups(self, source):
93 93 repo = self._repo
94 94 if not shallowutil.isenabled(repo):
95 95 return AllFiles
96 96
97 97 if source == "push" or source == "bundle":
98 98 return AllFiles
99 99
100 100 caps = self._bundlecaps or []
101 101 if source == "serve" or source == "pull":
102 102 if constants.BUNDLE2_CAPABLITY in caps:
103 103 return LocalFiles
104 104 else:
105 105 # Serving to a full repo requires us to serve everything
106 106 repo.ui.warn(_("pulling from a shallow repo\n"))
107 107 return AllFiles
108 108
109 109 return NoFiles
110 110
111 111 def prune(self, rlog, missing, commonrevs):
112 112 if not isinstance(rlog, remotefilelog.remotefilelog):
113 113 return super(shallowcg1packer, self).prune(rlog, missing,
114 114 commonrevs)
115 115
116 116 repo = self._repo
117 117 results = []
118 118 for fnode in missing:
119 119 fctx = repo.filectx(rlog.filename, fileid=fnode)
120 120 if fctx.linkrev() not in commonrevs:
121 121 results.append(fnode)
122 122 return results
123 123
124 124 def nodechunk(self, revlog, node, prevnode, linknode):
125 125 prefix = ''
126 126 if prevnode == nullid:
127 127 delta = revlog.revision(node, raw=True)
128 128 prefix = mdiff.trivialdiffheader(len(delta))
129 129 else:
130 130 # Actually uses remotefilelog.revdiff which works on nodes, not revs
131 131 delta = revlog.revdiff(prevnode, node)
132 132 p1, p2 = revlog.parents(node)
133 133 flags = revlog.flags(node)
134 134 meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
135 135 meta += prefix
136 136 l = len(meta) + len(delta)
137 137 yield changegroup.chunkheader(l)
138 138 yield meta
139 139 yield delta
140 140
141 141 def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
142 142 if not shallowutil.isenabled(repo):
143 143 return orig(repo, outgoing, version, source, *args, **kwargs)
144 144
145 145 original = repo.shallowmatch
146 146 try:
147 147 # if serving, only send files the clients has patterns for
148 148 if source == 'serve':
149 149 bundlecaps = kwargs.get(r'bundlecaps')
150 150 includepattern = None
151 151 excludepattern = None
152 152 for cap in (bundlecaps or []):
153 153 if cap.startswith("includepattern="):
154 154 raw = cap[len("includepattern="):]
155 155 if raw:
156 156 includepattern = raw.split('\0')
157 157 elif cap.startswith("excludepattern="):
158 158 raw = cap[len("excludepattern="):]
159 159 if raw:
160 160 excludepattern = raw.split('\0')
161 161 if includepattern or excludepattern:
162 162 repo.shallowmatch = match.match(repo.root, '', None,
163 163 includepattern, excludepattern)
164 164 else:
165 165 repo.shallowmatch = match.always(repo.root, '')
166 166 return orig(repo, outgoing, version, source, *args, **kwargs)
167 167 finally:
168 168 repo.shallowmatch = original
169 169
170 170 def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
171 171 if not shallowutil.isenabled(repo):
172 172 return orig(repo, source, revmap, trp, expectedfiles, *args)
173 173
174 files = 0
175 174 newfiles = 0
176 175 visited = set()
177 176 revisiondatas = {}
178 177 queue = []
179 178
180 179 # Normal Mercurial processes each file one at a time, adding all
181 180 # the new revisions for that file at once. In remotefilelog a file
182 181 # revision may depend on a different file's revision (in the case
183 182 # of a rename/copy), so we must lay all revisions down across all
184 183 # files in topological order.
185 184
186 185 # read all the file chunks but don't add them
186 progress = repo.ui.makeprogress(_('files'), total=expectedfiles)
187 187 while True:
188 188 chunkdata = source.filelogheader()
189 189 if not chunkdata:
190 190 break
191 files += 1
192 191 f = chunkdata["filename"]
193 192 repo.ui.debug("adding %s revisions\n" % f)
194 repo.ui.progress(_('files'), files, total=expectedfiles)
193 progress.increment()
195 194
196 195 if not repo.shallowmatch(f):
197 196 fl = repo.file(f)
198 197 deltas = source.deltaiter()
199 198 fl.addgroup(deltas, revmap, trp)
200 199 continue
201 200
202 201 chain = None
203 202 while True:
204 203 # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
205 204 revisiondata = source.deltachunk(chain)
206 205 if not revisiondata:
207 206 break
208 207
209 208 chain = revisiondata[0]
210 209
211 210 revisiondatas[(f, chain)] = revisiondata
212 211 queue.append((f, chain))
213 212
214 213 if f not in visited:
215 214 newfiles += 1
216 215 visited.add(f)
217 216
218 217 if chain is None:
219 218 raise error.Abort(_("received file revlog group is empty"))
220 219
221 220 processed = set()
222 221 def available(f, node, depf, depnode):
223 222 if depnode != nullid and (depf, depnode) not in processed:
224 223 if not (depf, depnode) in revisiondatas:
225 224 # It's not in the changegroup, assume it's already
226 225 # in the repo
227 226 return True
228 227 # re-add self to queue
229 228 queue.insert(0, (f, node))
230 229 # add dependency in front
231 230 queue.insert(0, (depf, depnode))
232 231 return False
233 232 return True
234 233
235 234 skipcount = 0
236 235
237 236 # Prefetch the non-bundled revisions that we will need
238 237 prefetchfiles = []
239 238 for f, node in queue:
240 239 revisiondata = revisiondatas[(f, node)]
241 240 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
242 241 dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
243 242
244 243 for dependent in dependents:
245 244 if dependent == nullid or (f, dependent) in revisiondatas:
246 245 continue
247 246 prefetchfiles.append((f, hex(dependent)))
248 247
249 248 repo.fileservice.prefetch(prefetchfiles)
250 249
251 250 # Apply the revisions in topological order such that a revision
252 251 # is only written once it's deltabase and parents have been written.
253 252 while queue:
254 253 f, node = queue.pop(0)
255 254 if (f, node) in processed:
256 255 continue
257 256
258 257 skipcount += 1
259 258 if skipcount > len(queue) + 1:
260 259 raise error.Abort(_("circular node dependency"))
261 260
262 261 fl = repo.file(f)
263 262
264 263 revisiondata = revisiondatas[(f, node)]
265 264 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
266 265 node, p1, p2, linknode, deltabase, delta, flags = revisiondata
267 266
268 267 if not available(f, node, f, deltabase):
269 268 continue
270 269
271 270 base = fl.revision(deltabase, raw=True)
272 271 text = mdiff.patch(base, delta)
273 272 if isinstance(text, buffer):
274 273 text = str(text)
275 274
276 275 meta, text = shallowutil.parsemeta(text)
277 276 if 'copy' in meta:
278 277 copyfrom = meta['copy']
279 278 copynode = bin(meta['copyrev'])
280 279 if not available(f, node, copyfrom, copynode):
281 280 continue
282 281
283 282 for p in [p1, p2]:
284 283 if p != nullid:
285 284 if not available(f, node, f, p):
286 285 continue
287 286
288 287 fl.add(text, meta, trp, linknode, p1, p2)
289 288 processed.add((f, node))
290 289 skipcount = 0
291 290
292 repo.ui.progress(_('files'), None)
291 progress.complete()
293 292
294 293 return len(revisiondatas), newfiles
General Comments 0
You need to be logged in to leave comments. Login now