##// END OF EJS Templates
rawdata: update callers in shallowbundle...
marmoute -
r43050:1d9031b6 default
parent child Browse files
Show More
@@ -1,293 +1,293 b''
1 1 # shallowbundle.py - bundle10 implementation for use with shallow repositories
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 from mercurial.i18n import _
10 10 from mercurial.node import bin, hex, nullid
11 11 from mercurial import (
12 12 bundlerepo,
13 13 changegroup,
14 14 error,
15 15 match,
16 16 mdiff,
17 17 pycompat,
18 18 )
19 19 from . import (
20 20 constants,
21 21 remotefilelog,
22 22 shallowutil,
23 23 )
24 24
25 25 NoFiles = 0
26 26 LocalFiles = 1
27 27 AllFiles = 2
28 28
29 29 def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
30 30 if not isinstance(rlog, remotefilelog.remotefilelog):
31 31 for c in super(cls, self).group(nodelist, rlog, lookup,
32 32 units=units):
33 33 yield c
34 34 return
35 35
36 36 if len(nodelist) == 0:
37 37 yield self.close()
38 38 return
39 39
40 40 nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
41 41
42 42 # add the parent of the first rev
43 43 p = rlog.parents(nodelist[0])[0]
44 44 nodelist.insert(0, p)
45 45
46 46 # build deltas
47 47 for i in pycompat.xrange(len(nodelist) - 1):
48 48 prev, curr = nodelist[i], nodelist[i + 1]
49 49 linknode = lookup(curr)
50 50 for c in self.nodechunk(rlog, curr, prev, linknode):
51 51 yield c
52 52
53 53 yield self.close()
54 54
55 55 class shallowcg1packer(changegroup.cgpacker):
56 56 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
57 57 if shallowutil.isenabled(self._repo):
58 58 fastpathlinkrev = False
59 59
60 60 return super(shallowcg1packer, self).generate(commonrevs, clnodes,
61 61 fastpathlinkrev, source)
62 62
63 63 def group(self, nodelist, rlog, lookup, units=None, reorder=None):
64 64 return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup,
65 65 units=units)
66 66
67 67 def generatefiles(self, changedfiles, *args):
68 68 try:
69 69 linknodes, commonrevs, source = args
70 70 except ValueError:
71 71 commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
72 72 if shallowutil.isenabled(self._repo):
73 73 repo = self._repo
74 74 if isinstance(repo, bundlerepo.bundlerepository):
75 75 # If the bundle contains filelogs, we can't pull from it, since
76 76 # bundlerepo is heavily tied to revlogs. Instead require that
77 77 # the user use unbundle instead.
78 78 # Force load the filelog data.
79 79 bundlerepo.bundlerepository.file(repo, 'foo')
80 80 if repo._cgfilespos:
81 81 raise error.Abort("cannot pull from full bundles",
82 82 hint="use `hg unbundle` instead")
83 83 return []
84 84 filestosend = self.shouldaddfilegroups(source)
85 85 if filestosend == NoFiles:
86 86 changedfiles = list([f for f in changedfiles
87 87 if not repo.shallowmatch(f)])
88 88
89 89 return super(shallowcg1packer, self).generatefiles(
90 90 changedfiles, *args)
91 91
92 92 def shouldaddfilegroups(self, source):
93 93 repo = self._repo
94 94 if not shallowutil.isenabled(repo):
95 95 return AllFiles
96 96
97 97 if source == "push" or source == "bundle":
98 98 return AllFiles
99 99
100 100 caps = self._bundlecaps or []
101 101 if source == "serve" or source == "pull":
102 102 if constants.BUNDLE2_CAPABLITY in caps:
103 103 return LocalFiles
104 104 else:
105 105 # Serving to a full repo requires us to serve everything
106 106 repo.ui.warn(_("pulling from a shallow repo\n"))
107 107 return AllFiles
108 108
109 109 return NoFiles
110 110
111 111 def prune(self, rlog, missing, commonrevs):
112 112 if not isinstance(rlog, remotefilelog.remotefilelog):
113 113 return super(shallowcg1packer, self).prune(rlog, missing,
114 114 commonrevs)
115 115
116 116 repo = self._repo
117 117 results = []
118 118 for fnode in missing:
119 119 fctx = repo.filectx(rlog.filename, fileid=fnode)
120 120 if fctx.linkrev() not in commonrevs:
121 121 results.append(fnode)
122 122 return results
123 123
124 124 def nodechunk(self, revlog, node, prevnode, linknode):
125 125 prefix = ''
126 126 if prevnode == nullid:
127 delta = revlog.revision(node, raw=True)
127 delta = revlog.rawdata(node)
128 128 prefix = mdiff.trivialdiffheader(len(delta))
129 129 else:
130 130 # Actually uses remotefilelog.revdiff which works on nodes, not revs
131 131 delta = revlog.revdiff(prevnode, node)
132 132 p1, p2 = revlog.parents(node)
133 133 flags = revlog.flags(node)
134 134 meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
135 135 meta += prefix
136 136 l = len(meta) + len(delta)
137 137 yield changegroup.chunkheader(l)
138 138 yield meta
139 139 yield delta
140 140
141 141 def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
142 142 if not shallowutil.isenabled(repo):
143 143 return orig(repo, outgoing, version, source, *args, **kwargs)
144 144
145 145 original = repo.shallowmatch
146 146 try:
147 147 # if serving, only send files the clients has patterns for
148 148 if source == 'serve':
149 149 bundlecaps = kwargs.get(r'bundlecaps')
150 150 includepattern = None
151 151 excludepattern = None
152 152 for cap in (bundlecaps or []):
153 153 if cap.startswith("includepattern="):
154 154 raw = cap[len("includepattern="):]
155 155 if raw:
156 156 includepattern = raw.split('\0')
157 157 elif cap.startswith("excludepattern="):
158 158 raw = cap[len("excludepattern="):]
159 159 if raw:
160 160 excludepattern = raw.split('\0')
161 161 if includepattern or excludepattern:
162 162 repo.shallowmatch = match.match(repo.root, '', None,
163 163 includepattern, excludepattern)
164 164 else:
165 165 repo.shallowmatch = match.always()
166 166 return orig(repo, outgoing, version, source, *args, **kwargs)
167 167 finally:
168 168 repo.shallowmatch = original
169 169
170 170 def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
171 171 if not shallowutil.isenabled(repo):
172 172 return orig(repo, source, revmap, trp, expectedfiles, *args)
173 173
174 174 newfiles = 0
175 175 visited = set()
176 176 revisiondatas = {}
177 177 queue = []
178 178
179 179 # Normal Mercurial processes each file one at a time, adding all
180 180 # the new revisions for that file at once. In remotefilelog a file
181 181 # revision may depend on a different file's revision (in the case
182 182 # of a rename/copy), so we must lay all revisions down across all
183 183 # files in topological order.
184 184
185 185 # read all the file chunks but don't add them
186 186 progress = repo.ui.makeprogress(_('files'), total=expectedfiles)
187 187 while True:
188 188 chunkdata = source.filelogheader()
189 189 if not chunkdata:
190 190 break
191 191 f = chunkdata["filename"]
192 192 repo.ui.debug("adding %s revisions\n" % f)
193 193 progress.increment()
194 194
195 195 if not repo.shallowmatch(f):
196 196 fl = repo.file(f)
197 197 deltas = source.deltaiter()
198 198 fl.addgroup(deltas, revmap, trp)
199 199 continue
200 200
201 201 chain = None
202 202 while True:
203 203 # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
204 204 revisiondata = source.deltachunk(chain)
205 205 if not revisiondata:
206 206 break
207 207
208 208 chain = revisiondata[0]
209 209
210 210 revisiondatas[(f, chain)] = revisiondata
211 211 queue.append((f, chain))
212 212
213 213 if f not in visited:
214 214 newfiles += 1
215 215 visited.add(f)
216 216
217 217 if chain is None:
218 218 raise error.Abort(_("received file revlog group is empty"))
219 219
220 220 processed = set()
221 221 def available(f, node, depf, depnode):
222 222 if depnode != nullid and (depf, depnode) not in processed:
223 223 if not (depf, depnode) in revisiondatas:
224 224 # It's not in the changegroup, assume it's already
225 225 # in the repo
226 226 return True
227 227 # re-add self to queue
228 228 queue.insert(0, (f, node))
229 229 # add dependency in front
230 230 queue.insert(0, (depf, depnode))
231 231 return False
232 232 return True
233 233
234 234 skipcount = 0
235 235
236 236 # Prefetch the non-bundled revisions that we will need
237 237 prefetchfiles = []
238 238 for f, node in queue:
239 239 revisiondata = revisiondatas[(f, node)]
240 240 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
241 241 dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
242 242
243 243 for dependent in dependents:
244 244 if dependent == nullid or (f, dependent) in revisiondatas:
245 245 continue
246 246 prefetchfiles.append((f, hex(dependent)))
247 247
248 248 repo.fileservice.prefetch(prefetchfiles)
249 249
250 250 # Apply the revisions in topological order such that a revision
251 251 # is only written once it's deltabase and parents have been written.
252 252 while queue:
253 253 f, node = queue.pop(0)
254 254 if (f, node) in processed:
255 255 continue
256 256
257 257 skipcount += 1
258 258 if skipcount > len(queue) + 1:
259 259 raise error.Abort(_("circular node dependency"))
260 260
261 261 fl = repo.file(f)
262 262
263 263 revisiondata = revisiondatas[(f, node)]
264 264 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
265 265 node, p1, p2, linknode, deltabase, delta, flags = revisiondata
266 266
267 267 if not available(f, node, f, deltabase):
268 268 continue
269 269
270 base = fl.revision(deltabase, raw=True)
270 base = fl.rawdata(deltabase)
271 271 text = mdiff.patch(base, delta)
272 272 if not isinstance(text, bytes):
273 273 text = bytes(text)
274 274
275 275 meta, text = shallowutil.parsemeta(text)
276 276 if 'copy' in meta:
277 277 copyfrom = meta['copy']
278 278 copynode = bin(meta['copyrev'])
279 279 if not available(f, node, copyfrom, copynode):
280 280 continue
281 281
282 282 for p in [p1, p2]:
283 283 if p != nullid:
284 284 if not available(f, node, f, p):
285 285 continue
286 286
287 287 fl.add(text, meta, trp, linknode, p1, p2)
288 288 processed.add((f, node))
289 289 skipcount = 0
290 290
291 291 progress.complete()
292 292
293 293 return len(revisiondatas), newfiles
General Comments 0
You need to be logged in to leave comments. Login now