##// END OF EJS Templates
remotefilelog: add 'changelog' arg to shallowcg1packer.generate (issue6269)...
Pulkit Goyal -
r44895:6d121acb 5.3.1 stable
parent child Browse files
Show More
@@ -1,303 +1,303 b''
1 1 # shallowbundle.py - bundle10 implementation for use with shallow repositories
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 from mercurial.i18n import _
10 10 from mercurial.node import bin, hex, nullid
11 11 from mercurial import (
12 12 bundlerepo,
13 13 changegroup,
14 14 error,
15 15 match,
16 16 mdiff,
17 17 pycompat,
18 18 )
19 19 from . import (
20 20 constants,
21 21 remotefilelog,
22 22 shallowutil,
23 23 )
24 24
25 25 NoFiles = 0
26 26 LocalFiles = 1
27 27 AllFiles = 2
28 28
29 29
30 30 def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
31 31 if not isinstance(rlog, remotefilelog.remotefilelog):
32 32 for c in super(cls, self).group(nodelist, rlog, lookup, units=units):
33 33 yield c
34 34 return
35 35
36 36 if len(nodelist) == 0:
37 37 yield self.close()
38 38 return
39 39
40 40 nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
41 41
42 42 # add the parent of the first rev
43 43 p = rlog.parents(nodelist[0])[0]
44 44 nodelist.insert(0, p)
45 45
46 46 # build deltas
47 47 for i in pycompat.xrange(len(nodelist) - 1):
48 48 prev, curr = nodelist[i], nodelist[i + 1]
49 49 linknode = lookup(curr)
50 50 for c in self.nodechunk(rlog, curr, prev, linknode):
51 51 yield c
52 52
53 53 yield self.close()
54 54
55 55
56 56 class shallowcg1packer(changegroup.cgpacker):
57 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
57 def generate(self, commonrevs, clnodes, fastpathlinkrev, source, **kwargs):
58 58 if shallowutil.isenabled(self._repo):
59 59 fastpathlinkrev = False
60 60
61 61 return super(shallowcg1packer, self).generate(
62 commonrevs, clnodes, fastpathlinkrev, source
62 commonrevs, clnodes, fastpathlinkrev, source, **kwargs
63 63 )
64 64
65 65 def group(self, nodelist, rlog, lookup, units=None, reorder=None):
66 66 return shallowgroup(
67 67 shallowcg1packer, self, nodelist, rlog, lookup, units=units
68 68 )
69 69
70 70 def generatefiles(self, changedfiles, *args):
71 71 try:
72 72 linknodes, commonrevs, source = args
73 73 except ValueError:
74 74 commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
75 75 if shallowutil.isenabled(self._repo):
76 76 repo = self._repo
77 77 if isinstance(repo, bundlerepo.bundlerepository):
78 78 # If the bundle contains filelogs, we can't pull from it, since
79 79 # bundlerepo is heavily tied to revlogs. Instead require that
80 80 # the user use unbundle instead.
81 81 # Force load the filelog data.
82 82 bundlerepo.bundlerepository.file(repo, b'foo')
83 83 if repo._cgfilespos:
84 84 raise error.Abort(
85 85 b"cannot pull from full bundles",
86 86 hint=b"use `hg unbundle` instead",
87 87 )
88 88 return []
89 89 filestosend = self.shouldaddfilegroups(source)
90 90 if filestosend == NoFiles:
91 91 changedfiles = list(
92 92 [f for f in changedfiles if not repo.shallowmatch(f)]
93 93 )
94 94
95 95 return super(shallowcg1packer, self).generatefiles(changedfiles, *args)
96 96
97 97 def shouldaddfilegroups(self, source):
98 98 repo = self._repo
99 99 if not shallowutil.isenabled(repo):
100 100 return AllFiles
101 101
102 102 if source == b"push" or source == b"bundle":
103 103 return AllFiles
104 104
105 105 caps = self._bundlecaps or []
106 106 if source == b"serve" or source == b"pull":
107 107 if constants.BUNDLE2_CAPABLITY in caps:
108 108 return LocalFiles
109 109 else:
110 110 # Serving to a full repo requires us to serve everything
111 111 repo.ui.warn(_(b"pulling from a shallow repo\n"))
112 112 return AllFiles
113 113
114 114 return NoFiles
115 115
116 116 def prune(self, rlog, missing, commonrevs):
117 117 if not isinstance(rlog, remotefilelog.remotefilelog):
118 118 return super(shallowcg1packer, self).prune(
119 119 rlog, missing, commonrevs
120 120 )
121 121
122 122 repo = self._repo
123 123 results = []
124 124 for fnode in missing:
125 125 fctx = repo.filectx(rlog.filename, fileid=fnode)
126 126 if fctx.linkrev() not in commonrevs:
127 127 results.append(fnode)
128 128 return results
129 129
130 130 def nodechunk(self, revlog, node, prevnode, linknode):
131 131 prefix = b''
132 132 if prevnode == nullid:
133 133 delta = revlog.rawdata(node)
134 134 prefix = mdiff.trivialdiffheader(len(delta))
135 135 else:
136 136 # Actually uses remotefilelog.revdiff which works on nodes, not revs
137 137 delta = revlog.revdiff(prevnode, node)
138 138 p1, p2 = revlog.parents(node)
139 139 flags = revlog.flags(node)
140 140 meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
141 141 meta += prefix
142 142 l = len(meta) + len(delta)
143 143 yield changegroup.chunkheader(l)
144 144 yield meta
145 145 yield delta
146 146
147 147
148 148 def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
149 149 if not shallowutil.isenabled(repo):
150 150 return orig(repo, outgoing, version, source, *args, **kwargs)
151 151
152 152 original = repo.shallowmatch
153 153 try:
154 154 # if serving, only send files the clients has patterns for
155 155 if source == b'serve':
156 156 bundlecaps = kwargs.get('bundlecaps')
157 157 includepattern = None
158 158 excludepattern = None
159 159 for cap in bundlecaps or []:
160 160 if cap.startswith(b"includepattern="):
161 161 raw = cap[len(b"includepattern=") :]
162 162 if raw:
163 163 includepattern = raw.split(b'\0')
164 164 elif cap.startswith(b"excludepattern="):
165 165 raw = cap[len(b"excludepattern=") :]
166 166 if raw:
167 167 excludepattern = raw.split(b'\0')
168 168 if includepattern or excludepattern:
169 169 repo.shallowmatch = match.match(
170 170 repo.root, b'', None, includepattern, excludepattern
171 171 )
172 172 else:
173 173 repo.shallowmatch = match.always()
174 174 return orig(repo, outgoing, version, source, *args, **kwargs)
175 175 finally:
176 176 repo.shallowmatch = original
177 177
178 178
179 179 def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
180 180 if not shallowutil.isenabled(repo):
181 181 return orig(repo, source, revmap, trp, expectedfiles, *args)
182 182
183 183 newfiles = 0
184 184 visited = set()
185 185 revisiondatas = {}
186 186 queue = []
187 187
188 188 # Normal Mercurial processes each file one at a time, adding all
189 189 # the new revisions for that file at once. In remotefilelog a file
190 190 # revision may depend on a different file's revision (in the case
191 191 # of a rename/copy), so we must lay all revisions down across all
192 192 # files in topological order.
193 193
194 194 # read all the file chunks but don't add them
195 195 progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles)
196 196 while True:
197 197 chunkdata = source.filelogheader()
198 198 if not chunkdata:
199 199 break
200 200 f = chunkdata[b"filename"]
201 201 repo.ui.debug(b"adding %s revisions\n" % f)
202 202 progress.increment()
203 203
204 204 if not repo.shallowmatch(f):
205 205 fl = repo.file(f)
206 206 deltas = source.deltaiter()
207 207 fl.addgroup(deltas, revmap, trp)
208 208 continue
209 209
210 210 chain = None
211 211 while True:
212 212 # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
213 213 revisiondata = source.deltachunk(chain)
214 214 if not revisiondata:
215 215 break
216 216
217 217 chain = revisiondata[0]
218 218
219 219 revisiondatas[(f, chain)] = revisiondata
220 220 queue.append((f, chain))
221 221
222 222 if f not in visited:
223 223 newfiles += 1
224 224 visited.add(f)
225 225
226 226 if chain is None:
227 227 raise error.Abort(_(b"received file revlog group is empty"))
228 228
229 229 processed = set()
230 230
231 231 def available(f, node, depf, depnode):
232 232 if depnode != nullid and (depf, depnode) not in processed:
233 233 if not (depf, depnode) in revisiondatas:
234 234 # It's not in the changegroup, assume it's already
235 235 # in the repo
236 236 return True
237 237 # re-add self to queue
238 238 queue.insert(0, (f, node))
239 239 # add dependency in front
240 240 queue.insert(0, (depf, depnode))
241 241 return False
242 242 return True
243 243
244 244 skipcount = 0
245 245
246 246 # Prefetch the non-bundled revisions that we will need
247 247 prefetchfiles = []
248 248 for f, node in queue:
249 249 revisiondata = revisiondatas[(f, node)]
250 250 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
251 251 dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
252 252
253 253 for dependent in dependents:
254 254 if dependent == nullid or (f, dependent) in revisiondatas:
255 255 continue
256 256 prefetchfiles.append((f, hex(dependent)))
257 257
258 258 repo.fileservice.prefetch(prefetchfiles)
259 259
260 260 # Apply the revisions in topological order such that a revision
261 261 # is only written once it's deltabase and parents have been written.
262 262 while queue:
263 263 f, node = queue.pop(0)
264 264 if (f, node) in processed:
265 265 continue
266 266
267 267 skipcount += 1
268 268 if skipcount > len(queue) + 1:
269 269 raise error.Abort(_(b"circular node dependency"))
270 270
271 271 fl = repo.file(f)
272 272
273 273 revisiondata = revisiondatas[(f, node)]
274 274 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
275 275 node, p1, p2, linknode, deltabase, delta, flags = revisiondata
276 276
277 277 if not available(f, node, f, deltabase):
278 278 continue
279 279
280 280 base = fl.rawdata(deltabase)
281 281 text = mdiff.patch(base, delta)
282 282 if not isinstance(text, bytes):
283 283 text = bytes(text)
284 284
285 285 meta, text = shallowutil.parsemeta(text)
286 286 if b'copy' in meta:
287 287 copyfrom = meta[b'copy']
288 288 copynode = bin(meta[b'copyrev'])
289 289 if not available(f, node, copyfrom, copynode):
290 290 continue
291 291
292 292 for p in [p1, p2]:
293 293 if p != nullid:
294 294 if not available(f, node, f, p):
295 295 continue
296 296
297 297 fl.add(text, meta, trp, linknode, p1, p2)
298 298 processed.add((f, node))
299 299 skipcount = 0
300 300
301 301 progress.complete()
302 302
303 303 return len(revisiondatas), newfiles
General Comments 0
You need to be logged in to leave comments. Login now