##// END OF EJS Templates
remotefilelog: check against bytes type instead of buffer and coerce to bytes...
Augie Fackler -
r41292:ebda5d42 default
parent child Browse files
Show More
@@ -1,293 +1,293 b''
1 # shallowbundle.py - bundle10 implementation for use with shallow repositories
1 # shallowbundle.py - bundle10 implementation for use with shallow repositories
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 from mercurial.i18n import _
9 from mercurial.i18n import _
10 from mercurial.node import bin, hex, nullid
10 from mercurial.node import bin, hex, nullid
11 from mercurial import (
11 from mercurial import (
12 bundlerepo,
12 bundlerepo,
13 changegroup,
13 changegroup,
14 error,
14 error,
15 match,
15 match,
16 mdiff,
16 mdiff,
17 pycompat,
17 pycompat,
18 )
18 )
19 from . import (
19 from . import (
20 constants,
20 constants,
21 remotefilelog,
21 remotefilelog,
22 shallowutil,
22 shallowutil,
23 )
23 )
24
24
25 NoFiles = 0
25 NoFiles = 0
26 LocalFiles = 1
26 LocalFiles = 1
27 AllFiles = 2
27 AllFiles = 2
28
28
29 def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
29 def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
30 if not isinstance(rlog, remotefilelog.remotefilelog):
30 if not isinstance(rlog, remotefilelog.remotefilelog):
31 for c in super(cls, self).group(nodelist, rlog, lookup,
31 for c in super(cls, self).group(nodelist, rlog, lookup,
32 units=units):
32 units=units):
33 yield c
33 yield c
34 return
34 return
35
35
36 if len(nodelist) == 0:
36 if len(nodelist) == 0:
37 yield self.close()
37 yield self.close()
38 return
38 return
39
39
40 nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
40 nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
41
41
42 # add the parent of the first rev
42 # add the parent of the first rev
43 p = rlog.parents(nodelist[0])[0]
43 p = rlog.parents(nodelist[0])[0]
44 nodelist.insert(0, p)
44 nodelist.insert(0, p)
45
45
46 # build deltas
46 # build deltas
47 for i in pycompat.xrange(len(nodelist) - 1):
47 for i in pycompat.xrange(len(nodelist) - 1):
48 prev, curr = nodelist[i], nodelist[i + 1]
48 prev, curr = nodelist[i], nodelist[i + 1]
49 linknode = lookup(curr)
49 linknode = lookup(curr)
50 for c in self.nodechunk(rlog, curr, prev, linknode):
50 for c in self.nodechunk(rlog, curr, prev, linknode):
51 yield c
51 yield c
52
52
53 yield self.close()
53 yield self.close()
54
54
55 class shallowcg1packer(changegroup.cgpacker):
55 class shallowcg1packer(changegroup.cgpacker):
56 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
56 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
57 if shallowutil.isenabled(self._repo):
57 if shallowutil.isenabled(self._repo):
58 fastpathlinkrev = False
58 fastpathlinkrev = False
59
59
60 return super(shallowcg1packer, self).generate(commonrevs, clnodes,
60 return super(shallowcg1packer, self).generate(commonrevs, clnodes,
61 fastpathlinkrev, source)
61 fastpathlinkrev, source)
62
62
63 def group(self, nodelist, rlog, lookup, units=None, reorder=None):
63 def group(self, nodelist, rlog, lookup, units=None, reorder=None):
64 return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup,
64 return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup,
65 units=units)
65 units=units)
66
66
67 def generatefiles(self, changedfiles, *args):
67 def generatefiles(self, changedfiles, *args):
68 try:
68 try:
69 linknodes, commonrevs, source = args
69 linknodes, commonrevs, source = args
70 except ValueError:
70 except ValueError:
71 commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
71 commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
72 if shallowutil.isenabled(self._repo):
72 if shallowutil.isenabled(self._repo):
73 repo = self._repo
73 repo = self._repo
74 if isinstance(repo, bundlerepo.bundlerepository):
74 if isinstance(repo, bundlerepo.bundlerepository):
75 # If the bundle contains filelogs, we can't pull from it, since
75 # If the bundle contains filelogs, we can't pull from it, since
76 # bundlerepo is heavily tied to revlogs. Instead require that
76 # bundlerepo is heavily tied to revlogs. Instead require that
77 # the user use unbundle instead.
77 # the user use unbundle instead.
78 # Force load the filelog data.
78 # Force load the filelog data.
79 bundlerepo.bundlerepository.file(repo, 'foo')
79 bundlerepo.bundlerepository.file(repo, 'foo')
80 if repo._cgfilespos:
80 if repo._cgfilespos:
81 raise error.Abort("cannot pull from full bundles",
81 raise error.Abort("cannot pull from full bundles",
82 hint="use `hg unbundle` instead")
82 hint="use `hg unbundle` instead")
83 return []
83 return []
84 filestosend = self.shouldaddfilegroups(source)
84 filestosend = self.shouldaddfilegroups(source)
85 if filestosend == NoFiles:
85 if filestosend == NoFiles:
86 changedfiles = list([f for f in changedfiles
86 changedfiles = list([f for f in changedfiles
87 if not repo.shallowmatch(f)])
87 if not repo.shallowmatch(f)])
88
88
89 return super(shallowcg1packer, self).generatefiles(
89 return super(shallowcg1packer, self).generatefiles(
90 changedfiles, *args)
90 changedfiles, *args)
91
91
92 def shouldaddfilegroups(self, source):
92 def shouldaddfilegroups(self, source):
93 repo = self._repo
93 repo = self._repo
94 if not shallowutil.isenabled(repo):
94 if not shallowutil.isenabled(repo):
95 return AllFiles
95 return AllFiles
96
96
97 if source == "push" or source == "bundle":
97 if source == "push" or source == "bundle":
98 return AllFiles
98 return AllFiles
99
99
100 caps = self._bundlecaps or []
100 caps = self._bundlecaps or []
101 if source == "serve" or source == "pull":
101 if source == "serve" or source == "pull":
102 if constants.BUNDLE2_CAPABLITY in caps:
102 if constants.BUNDLE2_CAPABLITY in caps:
103 return LocalFiles
103 return LocalFiles
104 else:
104 else:
105 # Serving to a full repo requires us to serve everything
105 # Serving to a full repo requires us to serve everything
106 repo.ui.warn(_("pulling from a shallow repo\n"))
106 repo.ui.warn(_("pulling from a shallow repo\n"))
107 return AllFiles
107 return AllFiles
108
108
109 return NoFiles
109 return NoFiles
110
110
111 def prune(self, rlog, missing, commonrevs):
111 def prune(self, rlog, missing, commonrevs):
112 if not isinstance(rlog, remotefilelog.remotefilelog):
112 if not isinstance(rlog, remotefilelog.remotefilelog):
113 return super(shallowcg1packer, self).prune(rlog, missing,
113 return super(shallowcg1packer, self).prune(rlog, missing,
114 commonrevs)
114 commonrevs)
115
115
116 repo = self._repo
116 repo = self._repo
117 results = []
117 results = []
118 for fnode in missing:
118 for fnode in missing:
119 fctx = repo.filectx(rlog.filename, fileid=fnode)
119 fctx = repo.filectx(rlog.filename, fileid=fnode)
120 if fctx.linkrev() not in commonrevs:
120 if fctx.linkrev() not in commonrevs:
121 results.append(fnode)
121 results.append(fnode)
122 return results
122 return results
123
123
124 def nodechunk(self, revlog, node, prevnode, linknode):
124 def nodechunk(self, revlog, node, prevnode, linknode):
125 prefix = ''
125 prefix = ''
126 if prevnode == nullid:
126 if prevnode == nullid:
127 delta = revlog.revision(node, raw=True)
127 delta = revlog.revision(node, raw=True)
128 prefix = mdiff.trivialdiffheader(len(delta))
128 prefix = mdiff.trivialdiffheader(len(delta))
129 else:
129 else:
130 # Actually uses remotefilelog.revdiff which works on nodes, not revs
130 # Actually uses remotefilelog.revdiff which works on nodes, not revs
131 delta = revlog.revdiff(prevnode, node)
131 delta = revlog.revdiff(prevnode, node)
132 p1, p2 = revlog.parents(node)
132 p1, p2 = revlog.parents(node)
133 flags = revlog.flags(node)
133 flags = revlog.flags(node)
134 meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
134 meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
135 meta += prefix
135 meta += prefix
136 l = len(meta) + len(delta)
136 l = len(meta) + len(delta)
137 yield changegroup.chunkheader(l)
137 yield changegroup.chunkheader(l)
138 yield meta
138 yield meta
139 yield delta
139 yield delta
140
140
141 def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
141 def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
142 if not shallowutil.isenabled(repo):
142 if not shallowutil.isenabled(repo):
143 return orig(repo, outgoing, version, source, *args, **kwargs)
143 return orig(repo, outgoing, version, source, *args, **kwargs)
144
144
145 original = repo.shallowmatch
145 original = repo.shallowmatch
146 try:
146 try:
147 # if serving, only send files the clients has patterns for
147 # if serving, only send files the clients has patterns for
148 if source == 'serve':
148 if source == 'serve':
149 bundlecaps = kwargs.get(r'bundlecaps')
149 bundlecaps = kwargs.get(r'bundlecaps')
150 includepattern = None
150 includepattern = None
151 excludepattern = None
151 excludepattern = None
152 for cap in (bundlecaps or []):
152 for cap in (bundlecaps or []):
153 if cap.startswith("includepattern="):
153 if cap.startswith("includepattern="):
154 raw = cap[len("includepattern="):]
154 raw = cap[len("includepattern="):]
155 if raw:
155 if raw:
156 includepattern = raw.split('\0')
156 includepattern = raw.split('\0')
157 elif cap.startswith("excludepattern="):
157 elif cap.startswith("excludepattern="):
158 raw = cap[len("excludepattern="):]
158 raw = cap[len("excludepattern="):]
159 if raw:
159 if raw:
160 excludepattern = raw.split('\0')
160 excludepattern = raw.split('\0')
161 if includepattern or excludepattern:
161 if includepattern or excludepattern:
162 repo.shallowmatch = match.match(repo.root, '', None,
162 repo.shallowmatch = match.match(repo.root, '', None,
163 includepattern, excludepattern)
163 includepattern, excludepattern)
164 else:
164 else:
165 repo.shallowmatch = match.always(repo.root, '')
165 repo.shallowmatch = match.always(repo.root, '')
166 return orig(repo, outgoing, version, source, *args, **kwargs)
166 return orig(repo, outgoing, version, source, *args, **kwargs)
167 finally:
167 finally:
168 repo.shallowmatch = original
168 repo.shallowmatch = original
169
169
170 def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
170 def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
171 if not shallowutil.isenabled(repo):
171 if not shallowutil.isenabled(repo):
172 return orig(repo, source, revmap, trp, expectedfiles, *args)
172 return orig(repo, source, revmap, trp, expectedfiles, *args)
173
173
174 newfiles = 0
174 newfiles = 0
175 visited = set()
175 visited = set()
176 revisiondatas = {}
176 revisiondatas = {}
177 queue = []
177 queue = []
178
178
179 # Normal Mercurial processes each file one at a time, adding all
179 # Normal Mercurial processes each file one at a time, adding all
180 # the new revisions for that file at once. In remotefilelog a file
180 # the new revisions for that file at once. In remotefilelog a file
181 # revision may depend on a different file's revision (in the case
181 # revision may depend on a different file's revision (in the case
182 # of a rename/copy), so we must lay all revisions down across all
182 # of a rename/copy), so we must lay all revisions down across all
183 # files in topological order.
183 # files in topological order.
184
184
185 # read all the file chunks but don't add them
185 # read all the file chunks but don't add them
186 progress = repo.ui.makeprogress(_('files'), total=expectedfiles)
186 progress = repo.ui.makeprogress(_('files'), total=expectedfiles)
187 while True:
187 while True:
188 chunkdata = source.filelogheader()
188 chunkdata = source.filelogheader()
189 if not chunkdata:
189 if not chunkdata:
190 break
190 break
191 f = chunkdata["filename"]
191 f = chunkdata["filename"]
192 repo.ui.debug("adding %s revisions\n" % f)
192 repo.ui.debug("adding %s revisions\n" % f)
193 progress.increment()
193 progress.increment()
194
194
195 if not repo.shallowmatch(f):
195 if not repo.shallowmatch(f):
196 fl = repo.file(f)
196 fl = repo.file(f)
197 deltas = source.deltaiter()
197 deltas = source.deltaiter()
198 fl.addgroup(deltas, revmap, trp)
198 fl.addgroup(deltas, revmap, trp)
199 continue
199 continue
200
200
201 chain = None
201 chain = None
202 while True:
202 while True:
203 # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
203 # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
204 revisiondata = source.deltachunk(chain)
204 revisiondata = source.deltachunk(chain)
205 if not revisiondata:
205 if not revisiondata:
206 break
206 break
207
207
208 chain = revisiondata[0]
208 chain = revisiondata[0]
209
209
210 revisiondatas[(f, chain)] = revisiondata
210 revisiondatas[(f, chain)] = revisiondata
211 queue.append((f, chain))
211 queue.append((f, chain))
212
212
213 if f not in visited:
213 if f not in visited:
214 newfiles += 1
214 newfiles += 1
215 visited.add(f)
215 visited.add(f)
216
216
217 if chain is None:
217 if chain is None:
218 raise error.Abort(_("received file revlog group is empty"))
218 raise error.Abort(_("received file revlog group is empty"))
219
219
220 processed = set()
220 processed = set()
221 def available(f, node, depf, depnode):
221 def available(f, node, depf, depnode):
222 if depnode != nullid and (depf, depnode) not in processed:
222 if depnode != nullid and (depf, depnode) not in processed:
223 if not (depf, depnode) in revisiondatas:
223 if not (depf, depnode) in revisiondatas:
224 # It's not in the changegroup, assume it's already
224 # It's not in the changegroup, assume it's already
225 # in the repo
225 # in the repo
226 return True
226 return True
227 # re-add self to queue
227 # re-add self to queue
228 queue.insert(0, (f, node))
228 queue.insert(0, (f, node))
229 # add dependency in front
229 # add dependency in front
230 queue.insert(0, (depf, depnode))
230 queue.insert(0, (depf, depnode))
231 return False
231 return False
232 return True
232 return True
233
233
234 skipcount = 0
234 skipcount = 0
235
235
236 # Prefetch the non-bundled revisions that we will need
236 # Prefetch the non-bundled revisions that we will need
237 prefetchfiles = []
237 prefetchfiles = []
238 for f, node in queue:
238 for f, node in queue:
239 revisiondata = revisiondatas[(f, node)]
239 revisiondata = revisiondatas[(f, node)]
240 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
240 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
241 dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
241 dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
242
242
243 for dependent in dependents:
243 for dependent in dependents:
244 if dependent == nullid or (f, dependent) in revisiondatas:
244 if dependent == nullid or (f, dependent) in revisiondatas:
245 continue
245 continue
246 prefetchfiles.append((f, hex(dependent)))
246 prefetchfiles.append((f, hex(dependent)))
247
247
248 repo.fileservice.prefetch(prefetchfiles)
248 repo.fileservice.prefetch(prefetchfiles)
249
249
250 # Apply the revisions in topological order such that a revision
250 # Apply the revisions in topological order such that a revision
251 # is only written once it's deltabase and parents have been written.
251 # is only written once it's deltabase and parents have been written.
252 while queue:
252 while queue:
253 f, node = queue.pop(0)
253 f, node = queue.pop(0)
254 if (f, node) in processed:
254 if (f, node) in processed:
255 continue
255 continue
256
256
257 skipcount += 1
257 skipcount += 1
258 if skipcount > len(queue) + 1:
258 if skipcount > len(queue) + 1:
259 raise error.Abort(_("circular node dependency"))
259 raise error.Abort(_("circular node dependency"))
260
260
261 fl = repo.file(f)
261 fl = repo.file(f)
262
262
263 revisiondata = revisiondatas[(f, node)]
263 revisiondata = revisiondatas[(f, node)]
264 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
264 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
265 node, p1, p2, linknode, deltabase, delta, flags = revisiondata
265 node, p1, p2, linknode, deltabase, delta, flags = revisiondata
266
266
267 if not available(f, node, f, deltabase):
267 if not available(f, node, f, deltabase):
268 continue
268 continue
269
269
270 base = fl.revision(deltabase, raw=True)
270 base = fl.revision(deltabase, raw=True)
271 text = mdiff.patch(base, delta)
271 text = mdiff.patch(base, delta)
272 if isinstance(text, buffer):
272 if not isinstance(text, bytes):
273 text = str(text)
273 text = bytes(text)
274
274
275 meta, text = shallowutil.parsemeta(text)
275 meta, text = shallowutil.parsemeta(text)
276 if 'copy' in meta:
276 if 'copy' in meta:
277 copyfrom = meta['copy']
277 copyfrom = meta['copy']
278 copynode = bin(meta['copyrev'])
278 copynode = bin(meta['copyrev'])
279 if not available(f, node, copyfrom, copynode):
279 if not available(f, node, copyfrom, copynode):
280 continue
280 continue
281
281
282 for p in [p1, p2]:
282 for p in [p1, p2]:
283 if p != nullid:
283 if p != nullid:
284 if not available(f, node, f, p):
284 if not available(f, node, f, p):
285 continue
285 continue
286
286
287 fl.add(text, meta, trp, linknode, p1, p2)
287 fl.add(text, meta, trp, linknode, p1, p2)
288 processed.add((f, node))
288 processed.add((f, node))
289 skipcount = 0
289 skipcount = 0
290
290
291 progress.complete()
291 progress.complete()
292
292
293 return len(revisiondatas), newfiles
293 return len(revisiondatas), newfiles
General Comments 0
You need to be logged in to leave comments. Login now