##// END OF EJS Templates
index: use `index.get_rev` in `remotefilelog`...
marmoute -
r43970:bda86cfe default
parent child Browse files
Show More
@@ -1,528 +1,528 b''
1 # remotefilectx.py - filectx/workingfilectx implementations for remotefilelog
1 # remotefilectx.py - filectx/workingfilectx implementations for remotefilelog
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import collections
9 import collections
10 import time
10 import time
11
11
12 from mercurial.node import bin, hex, nullid, nullrev
12 from mercurial.node import bin, hex, nullid, nullrev
13 from mercurial import (
13 from mercurial import (
14 ancestor,
14 ancestor,
15 context,
15 context,
16 error,
16 error,
17 phases,
17 phases,
18 util,
18 util,
19 )
19 )
20 from . import shallowutil
20 from . import shallowutil
21
21
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 FASTLOG_TIMEOUT_IN_SECS = 0.5
23 FASTLOG_TIMEOUT_IN_SECS = 0.5
24
24
25
25
26 class remotefilectx(context.filectx):
26 class remotefilectx(context.filectx):
27 def __init__(
27 def __init__(
28 self,
28 self,
29 repo,
29 repo,
30 path,
30 path,
31 changeid=None,
31 changeid=None,
32 fileid=None,
32 fileid=None,
33 filelog=None,
33 filelog=None,
34 changectx=None,
34 changectx=None,
35 ancestormap=None,
35 ancestormap=None,
36 ):
36 ):
37 if fileid == nullrev:
37 if fileid == nullrev:
38 fileid = nullid
38 fileid = nullid
39 if fileid and len(fileid) == 40:
39 if fileid and len(fileid) == 40:
40 fileid = bin(fileid)
40 fileid = bin(fileid)
41 super(remotefilectx, self).__init__(
41 super(remotefilectx, self).__init__(
42 repo, path, changeid, fileid, filelog, changectx
42 repo, path, changeid, fileid, filelog, changectx
43 )
43 )
44 self._ancestormap = ancestormap
44 self._ancestormap = ancestormap
45
45
46 def size(self):
46 def size(self):
47 return self._filelog.size(self._filenode)
47 return self._filelog.size(self._filenode)
48
48
49 @propertycache
49 @propertycache
50 def _changeid(self):
50 def _changeid(self):
51 if '_changeid' in self.__dict__:
51 if '_changeid' in self.__dict__:
52 return self._changeid
52 return self._changeid
53 elif '_changectx' in self.__dict__:
53 elif '_changectx' in self.__dict__:
54 return self._changectx.rev()
54 return self._changectx.rev()
55 elif '_descendantrev' in self.__dict__:
55 elif '_descendantrev' in self.__dict__:
56 # this file context was created from a revision with a known
56 # this file context was created from a revision with a known
57 # descendant, we can (lazily) correct for linkrev aliases
57 # descendant, we can (lazily) correct for linkrev aliases
58 linknode = self._adjustlinknode(
58 linknode = self._adjustlinknode(
59 self._path, self._filelog, self._filenode, self._descendantrev
59 self._path, self._filelog, self._filenode, self._descendantrev
60 )
60 )
61 return self._repo.unfiltered().changelog.rev(linknode)
61 return self._repo.unfiltered().changelog.rev(linknode)
62 else:
62 else:
63 return self.linkrev()
63 return self.linkrev()
64
64
65 def filectx(self, fileid, changeid=None):
65 def filectx(self, fileid, changeid=None):
66 '''opens an arbitrary revision of the file without
66 '''opens an arbitrary revision of the file without
67 opening a new filelog'''
67 opening a new filelog'''
68 return remotefilectx(
68 return remotefilectx(
69 self._repo,
69 self._repo,
70 self._path,
70 self._path,
71 fileid=fileid,
71 fileid=fileid,
72 filelog=self._filelog,
72 filelog=self._filelog,
73 changeid=changeid,
73 changeid=changeid,
74 )
74 )
75
75
76 def linkrev(self):
76 def linkrev(self):
77 return self._linkrev
77 return self._linkrev
78
78
79 @propertycache
79 @propertycache
80 def _linkrev(self):
80 def _linkrev(self):
81 if self._filenode == nullid:
81 if self._filenode == nullid:
82 return nullrev
82 return nullrev
83
83
84 ancestormap = self.ancestormap()
84 ancestormap = self.ancestormap()
85 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
85 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
86 rev = self._repo.changelog.nodemap.get(linknode)
86 rev = self._repo.changelog.index.get_rev(linknode)
87 if rev is not None:
87 if rev is not None:
88 return rev
88 return rev
89
89
90 # Search all commits for the appropriate linkrev (slow, but uncommon)
90 # Search all commits for the appropriate linkrev (slow, but uncommon)
91 path = self._path
91 path = self._path
92 fileid = self._filenode
92 fileid = self._filenode
93 cl = self._repo.unfiltered().changelog
93 cl = self._repo.unfiltered().changelog
94 mfl = self._repo.manifestlog
94 mfl = self._repo.manifestlog
95
95
96 for rev in range(len(cl) - 1, 0, -1):
96 for rev in range(len(cl) - 1, 0, -1):
97 node = cl.node(rev)
97 node = cl.node(rev)
98 data = cl.read(
98 data = cl.read(
99 node
99 node
100 ) # get changeset data (we avoid object creation)
100 ) # get changeset data (we avoid object creation)
101 if path in data[3]: # checking the 'files' field.
101 if path in data[3]: # checking the 'files' field.
102 # The file has been touched, check if the hash is what we're
102 # The file has been touched, check if the hash is what we're
103 # looking for.
103 # looking for.
104 if fileid == mfl[data[0]].readfast().get(path):
104 if fileid == mfl[data[0]].readfast().get(path):
105 return rev
105 return rev
106
106
107 # Couldn't find the linkrev. This should generally not happen, and will
107 # Couldn't find the linkrev. This should generally not happen, and will
108 # likely cause a crash.
108 # likely cause a crash.
109 return None
109 return None
110
110
111 def introrev(self):
111 def introrev(self):
112 """return the rev of the changeset which introduced this file revision
112 """return the rev of the changeset which introduced this file revision
113
113
114 This method is different from linkrev because it take into account the
114 This method is different from linkrev because it take into account the
115 changeset the filectx was created from. It ensures the returned
115 changeset the filectx was created from. It ensures the returned
116 revision is one of its ancestors. This prevents bugs from
116 revision is one of its ancestors. This prevents bugs from
117 'linkrev-shadowing' when a file revision is used by multiple
117 'linkrev-shadowing' when a file revision is used by multiple
118 changesets.
118 changesets.
119 """
119 """
120 lkr = self.linkrev()
120 lkr = self.linkrev()
121 attrs = vars(self)
121 attrs = vars(self)
122 noctx = not ('_changeid' in attrs or r'_changectx' in attrs)
122 noctx = not ('_changeid' in attrs or r'_changectx' in attrs)
123 if noctx or self.rev() == lkr:
123 if noctx or self.rev() == lkr:
124 return lkr
124 return lkr
125 linknode = self._adjustlinknode(
125 linknode = self._adjustlinknode(
126 self._path,
126 self._path,
127 self._filelog,
127 self._filelog,
128 self._filenode,
128 self._filenode,
129 self.rev(),
129 self.rev(),
130 inclusive=True,
130 inclusive=True,
131 )
131 )
132 return self._repo.changelog.rev(linknode)
132 return self._repo.changelog.rev(linknode)
133
133
134 def renamed(self):
134 def renamed(self):
135 """check if file was actually renamed in this changeset revision
135 """check if file was actually renamed in this changeset revision
136
136
137 If rename logged in file revision, we report copy for changeset only
137 If rename logged in file revision, we report copy for changeset only
138 if file revisions linkrev points back to the changeset in question
138 if file revisions linkrev points back to the changeset in question
139 or both changeset parents contain different file revisions.
139 or both changeset parents contain different file revisions.
140 """
140 """
141 ancestormap = self.ancestormap()
141 ancestormap = self.ancestormap()
142
142
143 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
143 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
144 if not copyfrom:
144 if not copyfrom:
145 return None
145 return None
146
146
147 renamed = (copyfrom, p1)
147 renamed = (copyfrom, p1)
148 if self.rev() == self.linkrev():
148 if self.rev() == self.linkrev():
149 return renamed
149 return renamed
150
150
151 name = self.path()
151 name = self.path()
152 fnode = self._filenode
152 fnode = self._filenode
153 for p in self._changectx.parents():
153 for p in self._changectx.parents():
154 try:
154 try:
155 if fnode == p.filenode(name):
155 if fnode == p.filenode(name):
156 return None
156 return None
157 except error.LookupError:
157 except error.LookupError:
158 pass
158 pass
159 return renamed
159 return renamed
160
160
161 def copysource(self):
161 def copysource(self):
162 copy = self.renamed()
162 copy = self.renamed()
163 return copy and copy[0]
163 return copy and copy[0]
164
164
165 def ancestormap(self):
165 def ancestormap(self):
166 if not self._ancestormap:
166 if not self._ancestormap:
167 self._ancestormap = self.filelog().ancestormap(self._filenode)
167 self._ancestormap = self.filelog().ancestormap(self._filenode)
168
168
169 return self._ancestormap
169 return self._ancestormap
170
170
171 def parents(self):
171 def parents(self):
172 repo = self._repo
172 repo = self._repo
173 ancestormap = self.ancestormap()
173 ancestormap = self.ancestormap()
174
174
175 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
175 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
176 results = []
176 results = []
177 if p1 != nullid:
177 if p1 != nullid:
178 path = copyfrom or self._path
178 path = copyfrom or self._path
179 flog = repo.file(path)
179 flog = repo.file(path)
180 p1ctx = remotefilectx(
180 p1ctx = remotefilectx(
181 repo, path, fileid=p1, filelog=flog, ancestormap=ancestormap
181 repo, path, fileid=p1, filelog=flog, ancestormap=ancestormap
182 )
182 )
183 p1ctx._descendantrev = self.rev()
183 p1ctx._descendantrev = self.rev()
184 results.append(p1ctx)
184 results.append(p1ctx)
185
185
186 if p2 != nullid:
186 if p2 != nullid:
187 path = self._path
187 path = self._path
188 flog = repo.file(path)
188 flog = repo.file(path)
189 p2ctx = remotefilectx(
189 p2ctx = remotefilectx(
190 repo, path, fileid=p2, filelog=flog, ancestormap=ancestormap
190 repo, path, fileid=p2, filelog=flog, ancestormap=ancestormap
191 )
191 )
192 p2ctx._descendantrev = self.rev()
192 p2ctx._descendantrev = self.rev()
193 results.append(p2ctx)
193 results.append(p2ctx)
194
194
195 return results
195 return results
196
196
197 def _nodefromancrev(self, ancrev, cl, mfl, path, fnode):
197 def _nodefromancrev(self, ancrev, cl, mfl, path, fnode):
198 """returns the node for <path> in <ancrev> if content matches <fnode>"""
198 """returns the node for <path> in <ancrev> if content matches <fnode>"""
199 ancctx = cl.read(ancrev) # This avoids object creation.
199 ancctx = cl.read(ancrev) # This avoids object creation.
200 manifestnode, files = ancctx[0], ancctx[3]
200 manifestnode, files = ancctx[0], ancctx[3]
201 # If the file was touched in this ancestor, and the content is similar
201 # If the file was touched in this ancestor, and the content is similar
202 # to the one we are searching for.
202 # to the one we are searching for.
203 if path in files and fnode == mfl[manifestnode].readfast().get(path):
203 if path in files and fnode == mfl[manifestnode].readfast().get(path):
204 return cl.node(ancrev)
204 return cl.node(ancrev)
205 return None
205 return None
206
206
207 def _adjustlinknode(self, path, filelog, fnode, srcrev, inclusive=False):
207 def _adjustlinknode(self, path, filelog, fnode, srcrev, inclusive=False):
208 """return the first ancestor of <srcrev> introducing <fnode>
208 """return the first ancestor of <srcrev> introducing <fnode>
209
209
210 If the linkrev of the file revision does not point to an ancestor of
210 If the linkrev of the file revision does not point to an ancestor of
211 srcrev, we'll walk down the ancestors until we find one introducing
211 srcrev, we'll walk down the ancestors until we find one introducing
212 this file revision.
212 this file revision.
213
213
214 :repo: a localrepository object (used to access changelog and manifest)
214 :repo: a localrepository object (used to access changelog and manifest)
215 :path: the file path
215 :path: the file path
216 :fnode: the nodeid of the file revision
216 :fnode: the nodeid of the file revision
217 :filelog: the filelog of this path
217 :filelog: the filelog of this path
218 :srcrev: the changeset revision we search ancestors from
218 :srcrev: the changeset revision we search ancestors from
219 :inclusive: if true, the src revision will also be checked
219 :inclusive: if true, the src revision will also be checked
220
220
221 Note: This is based on adjustlinkrev in core, but it's quite different.
221 Note: This is based on adjustlinkrev in core, but it's quite different.
222
222
223 adjustlinkrev depends on the fact that the linkrev is the bottom most
223 adjustlinkrev depends on the fact that the linkrev is the bottom most
224 node, and uses that as a stopping point for the ancestor traversal. We
224 node, and uses that as a stopping point for the ancestor traversal. We
225 can't do that here because the linknode is not guaranteed to be the
225 can't do that here because the linknode is not guaranteed to be the
226 bottom most one.
226 bottom most one.
227
227
228 In our code here, we actually know what a bunch of potential ancestor
228 In our code here, we actually know what a bunch of potential ancestor
229 linknodes are, so instead of stopping the cheap-ancestor-traversal when
229 linknodes are, so instead of stopping the cheap-ancestor-traversal when
230 we get to a linkrev, we stop when we see any of the known linknodes.
230 we get to a linkrev, we stop when we see any of the known linknodes.
231 """
231 """
232 repo = self._repo
232 repo = self._repo
233 cl = repo.unfiltered().changelog
233 cl = repo.unfiltered().changelog
234 mfl = repo.manifestlog
234 mfl = repo.manifestlog
235 ancestormap = self.ancestormap()
235 ancestormap = self.ancestormap()
236 linknode = ancestormap[fnode][2]
236 linknode = ancestormap[fnode][2]
237
237
238 if srcrev is None:
238 if srcrev is None:
239 # wctx case, used by workingfilectx during mergecopy
239 # wctx case, used by workingfilectx during mergecopy
240 revs = [p.rev() for p in self._repo[None].parents()]
240 revs = [p.rev() for p in self._repo[None].parents()]
241 inclusive = True # we skipped the real (revless) source
241 inclusive = True # we skipped the real (revless) source
242 else:
242 else:
243 revs = [srcrev]
243 revs = [srcrev]
244
244
245 if self._verifylinknode(revs, linknode):
245 if self._verifylinknode(revs, linknode):
246 return linknode
246 return linknode
247
247
248 commonlogkwargs = {
248 commonlogkwargs = {
249 'revs': b' '.join([hex(cl.node(rev)) for rev in revs]),
249 'revs': b' '.join([hex(cl.node(rev)) for rev in revs]),
250 'fnode': hex(fnode),
250 'fnode': hex(fnode),
251 'filepath': path,
251 'filepath': path,
252 'user': shallowutil.getusername(repo.ui),
252 'user': shallowutil.getusername(repo.ui),
253 'reponame': shallowutil.getreponame(repo.ui),
253 'reponame': shallowutil.getreponame(repo.ui),
254 }
254 }
255
255
256 repo.ui.log(b'linkrevfixup', b'adjusting linknode\n', **commonlogkwargs)
256 repo.ui.log(b'linkrevfixup', b'adjusting linknode\n', **commonlogkwargs)
257
257
258 pc = repo._phasecache
258 pc = repo._phasecache
259 seenpublic = False
259 seenpublic = False
260 iteranc = cl.ancestors(revs, inclusive=inclusive)
260 iteranc = cl.ancestors(revs, inclusive=inclusive)
261 for ancrev in iteranc:
261 for ancrev in iteranc:
262 # First, check locally-available history.
262 # First, check locally-available history.
263 lnode = self._nodefromancrev(ancrev, cl, mfl, path, fnode)
263 lnode = self._nodefromancrev(ancrev, cl, mfl, path, fnode)
264 if lnode is not None:
264 if lnode is not None:
265 return lnode
265 return lnode
266
266
267 # adjusting linknode can be super-slow. To mitigate the issue
267 # adjusting linknode can be super-slow. To mitigate the issue
268 # we use two heuristics: calling fastlog and forcing remotefilelog
268 # we use two heuristics: calling fastlog and forcing remotefilelog
269 # prefetch
269 # prefetch
270 if not seenpublic and pc.phase(repo, ancrev) == phases.public:
270 if not seenpublic and pc.phase(repo, ancrev) == phases.public:
271 # TODO: there used to be a codepath to fetch linknodes
271 # TODO: there used to be a codepath to fetch linknodes
272 # from a server as a fast path, but it appeared to
272 # from a server as a fast path, but it appeared to
273 # depend on an API FB added to their phabricator.
273 # depend on an API FB added to their phabricator.
274 lnode = self._forceprefetch(
274 lnode = self._forceprefetch(
275 repo, path, fnode, revs, commonlogkwargs
275 repo, path, fnode, revs, commonlogkwargs
276 )
276 )
277 if lnode:
277 if lnode:
278 return lnode
278 return lnode
279 seenpublic = True
279 seenpublic = True
280
280
281 return linknode
281 return linknode
282
282
283 def _forceprefetch(self, repo, path, fnode, revs, commonlogkwargs):
283 def _forceprefetch(self, repo, path, fnode, revs, commonlogkwargs):
284 # This next part is super non-obvious, so big comment block time!
284 # This next part is super non-obvious, so big comment block time!
285 #
285 #
286 # It is possible to get extremely bad performance here when a fairly
286 # It is possible to get extremely bad performance here when a fairly
287 # common set of circumstances occur when this extension is combined
287 # common set of circumstances occur when this extension is combined
288 # with a server-side commit rewriting extension like pushrebase.
288 # with a server-side commit rewriting extension like pushrebase.
289 #
289 #
290 # First, an engineer creates Commit A and pushes it to the server.
290 # First, an engineer creates Commit A and pushes it to the server.
291 # While the server's data structure will have the correct linkrev
291 # While the server's data structure will have the correct linkrev
292 # for the files touched in Commit A, the client will have the
292 # for the files touched in Commit A, the client will have the
293 # linkrev of the local commit, which is "invalid" because it's not
293 # linkrev of the local commit, which is "invalid" because it's not
294 # an ancestor of the main line of development.
294 # an ancestor of the main line of development.
295 #
295 #
296 # The client will never download the remotefilelog with the correct
296 # The client will never download the remotefilelog with the correct
297 # linkrev as long as nobody else touches that file, since the file
297 # linkrev as long as nobody else touches that file, since the file
298 # data and history hasn't changed since Commit A.
298 # data and history hasn't changed since Commit A.
299 #
299 #
300 # After a long time (or a short time in a heavily used repo), if the
300 # After a long time (or a short time in a heavily used repo), if the
301 # same engineer returns to change the same file, some commands --
301 # same engineer returns to change the same file, some commands --
302 # such as amends of commits with file moves, logs, diffs, etc --
302 # such as amends of commits with file moves, logs, diffs, etc --
303 # can trigger this _adjustlinknode code. In those cases, finding
303 # can trigger this _adjustlinknode code. In those cases, finding
304 # the correct rev can become quite expensive, as the correct
304 # the correct rev can become quite expensive, as the correct
305 # revision is far back in history and we need to walk back through
305 # revision is far back in history and we need to walk back through
306 # history to find it.
306 # history to find it.
307 #
307 #
308 # In order to improve this situation, we force a prefetch of the
308 # In order to improve this situation, we force a prefetch of the
309 # remotefilelog data blob for the file we were called on. We do this
309 # remotefilelog data blob for the file we were called on. We do this
310 # at most once, when we first see a public commit in the history we
310 # at most once, when we first see a public commit in the history we
311 # are traversing.
311 # are traversing.
312 #
312 #
313 # Forcing the prefetch means we will download the remote blob even
313 # Forcing the prefetch means we will download the remote blob even
314 # if we have the "correct" blob in the local store. Since the union
314 # if we have the "correct" blob in the local store. Since the union
315 # store checks the remote store first, this means we are much more
315 # store checks the remote store first, this means we are much more
316 # likely to get the correct linkrev at this point.
316 # likely to get the correct linkrev at this point.
317 #
317 #
318 # In rare circumstances (such as the server having a suboptimal
318 # In rare circumstances (such as the server having a suboptimal
319 # linkrev for our use case), we will fall back to the old slow path.
319 # linkrev for our use case), we will fall back to the old slow path.
320 #
320 #
321 # We may want to add additional heuristics here in the future if
321 # We may want to add additional heuristics here in the future if
322 # the slow path is used too much. One promising possibility is using
322 # the slow path is used too much. One promising possibility is using
323 # obsolescence markers to find a more-likely-correct linkrev.
323 # obsolescence markers to find a more-likely-correct linkrev.
324
324
325 logmsg = b''
325 logmsg = b''
326 start = time.time()
326 start = time.time()
327 try:
327 try:
328 repo.fileservice.prefetch([(path, hex(fnode))], force=True)
328 repo.fileservice.prefetch([(path, hex(fnode))], force=True)
329
329
330 # Now that we've downloaded a new blob from the server,
330 # Now that we've downloaded a new blob from the server,
331 # we need to rebuild the ancestor map to recompute the
331 # we need to rebuild the ancestor map to recompute the
332 # linknodes.
332 # linknodes.
333 self._ancestormap = None
333 self._ancestormap = None
334 linknode = self.ancestormap()[fnode][2] # 2 is linknode
334 linknode = self.ancestormap()[fnode][2] # 2 is linknode
335 if self._verifylinknode(revs, linknode):
335 if self._verifylinknode(revs, linknode):
336 logmsg = b'remotefilelog prefetching succeeded'
336 logmsg = b'remotefilelog prefetching succeeded'
337 return linknode
337 return linknode
338 logmsg = b'remotefilelog prefetching not found'
338 logmsg = b'remotefilelog prefetching not found'
339 return None
339 return None
340 except Exception as e:
340 except Exception as e:
341 logmsg = b'remotefilelog prefetching failed (%s)' % e
341 logmsg = b'remotefilelog prefetching failed (%s)' % e
342 return None
342 return None
343 finally:
343 finally:
344 elapsed = time.time() - start
344 elapsed = time.time() - start
345 repo.ui.log(
345 repo.ui.log(
346 b'linkrevfixup',
346 b'linkrevfixup',
347 logmsg + b'\n',
347 logmsg + b'\n',
348 elapsed=elapsed * 1000,
348 elapsed=elapsed * 1000,
349 **commonlogkwargs
349 **commonlogkwargs
350 )
350 )
351
351
352 def _verifylinknode(self, revs, linknode):
352 def _verifylinknode(self, revs, linknode):
353 """
353 """
354 Check if a linknode is correct one for the current history.
354 Check if a linknode is correct one for the current history.
355
355
356 That is, return True if the linkrev is the ancestor of any of the
356 That is, return True if the linkrev is the ancestor of any of the
357 passed in revs, otherwise return False.
357 passed in revs, otherwise return False.
358
358
359 `revs` is a list that usually has one element -- usually the wdir parent
359 `revs` is a list that usually has one element -- usually the wdir parent
360 or the user-passed rev we're looking back from. It may contain two revs
360 or the user-passed rev we're looking back from. It may contain two revs
361 when there is a merge going on, or zero revs when a root node with no
361 when there is a merge going on, or zero revs when a root node with no
362 parents is being created.
362 parents is being created.
363 """
363 """
364 if not revs:
364 if not revs:
365 return False
365 return False
366 try:
366 try:
367 # Use the C fastpath to check if the given linknode is correct.
367 # Use the C fastpath to check if the given linknode is correct.
368 cl = self._repo.unfiltered().changelog
368 cl = self._repo.unfiltered().changelog
369 return any(cl.isancestor(linknode, cl.node(r)) for r in revs)
369 return any(cl.isancestor(linknode, cl.node(r)) for r in revs)
370 except error.LookupError:
370 except error.LookupError:
371 # The linknode read from the blob may have been stripped or
371 # The linknode read from the blob may have been stripped or
372 # otherwise not present in the repository anymore. Do not fail hard
372 # otherwise not present in the repository anymore. Do not fail hard
373 # in this case. Instead, return false and continue the search for
373 # in this case. Instead, return false and continue the search for
374 # the correct linknode.
374 # the correct linknode.
375 return False
375 return False
376
376
377 def ancestors(self, followfirst=False):
377 def ancestors(self, followfirst=False):
378 ancestors = []
378 ancestors = []
379 queue = collections.deque((self,))
379 queue = collections.deque((self,))
380 seen = set()
380 seen = set()
381 while queue:
381 while queue:
382 current = queue.pop()
382 current = queue.pop()
383 if current.filenode() in seen:
383 if current.filenode() in seen:
384 continue
384 continue
385 seen.add(current.filenode())
385 seen.add(current.filenode())
386
386
387 ancestors.append(current)
387 ancestors.append(current)
388
388
389 parents = current.parents()
389 parents = current.parents()
390 first = True
390 first = True
391 for p in parents:
391 for p in parents:
392 if first or not followfirst:
392 if first or not followfirst:
393 queue.append(p)
393 queue.append(p)
394 first = False
394 first = False
395
395
396 # Remove self
396 # Remove self
397 ancestors.pop(0)
397 ancestors.pop(0)
398
398
399 # Sort by linkrev
399 # Sort by linkrev
400 # The copy tracing algorithm depends on these coming out in order
400 # The copy tracing algorithm depends on these coming out in order
401 ancestors = sorted(ancestors, reverse=True, key=lambda x: x.linkrev())
401 ancestors = sorted(ancestors, reverse=True, key=lambda x: x.linkrev())
402
402
403 for ancestor in ancestors:
403 for ancestor in ancestors:
404 yield ancestor
404 yield ancestor
405
405
406 def ancestor(self, fc2, actx):
406 def ancestor(self, fc2, actx):
407 # the easy case: no (relevant) renames
407 # the easy case: no (relevant) renames
408 if fc2.path() == self.path() and self.path() in actx:
408 if fc2.path() == self.path() and self.path() in actx:
409 return actx[self.path()]
409 return actx[self.path()]
410
410
411 # the next easiest cases: unambiguous predecessor (name trumps
411 # the next easiest cases: unambiguous predecessor (name trumps
412 # history)
412 # history)
413 if self.path() in actx and fc2.path() not in actx:
413 if self.path() in actx and fc2.path() not in actx:
414 return actx[self.path()]
414 return actx[self.path()]
415 if fc2.path() in actx and self.path() not in actx:
415 if fc2.path() in actx and self.path() not in actx:
416 return actx[fc2.path()]
416 return actx[fc2.path()]
417
417
418 # do a full traversal
418 # do a full traversal
419 amap = self.ancestormap()
419 amap = self.ancestormap()
420 bmap = fc2.ancestormap()
420 bmap = fc2.ancestormap()
421
421
422 def parents(x):
422 def parents(x):
423 f, n = x
423 f, n = x
424 p = amap.get(n) or bmap.get(n)
424 p = amap.get(n) or bmap.get(n)
425 if not p:
425 if not p:
426 return []
426 return []
427
427
428 return [(p[3] or f, p[0]), (f, p[1])]
428 return [(p[3] or f, p[0]), (f, p[1])]
429
429
430 a = (self.path(), self.filenode())
430 a = (self.path(), self.filenode())
431 b = (fc2.path(), fc2.filenode())
431 b = (fc2.path(), fc2.filenode())
432 result = ancestor.genericancestor(a, b, parents)
432 result = ancestor.genericancestor(a, b, parents)
433 if result:
433 if result:
434 f, n = result
434 f, n = result
435 r = remotefilectx(self._repo, f, fileid=n, ancestormap=amap)
435 r = remotefilectx(self._repo, f, fileid=n, ancestormap=amap)
436 return r
436 return r
437
437
438 return None
438 return None
439
439
440 def annotate(self, *args, **kwargs):
440 def annotate(self, *args, **kwargs):
441 introctx = self
441 introctx = self
442 prefetchskip = kwargs.pop('prefetchskip', None)
442 prefetchskip = kwargs.pop('prefetchskip', None)
443 if prefetchskip:
443 if prefetchskip:
444 # use introrev so prefetchskip can be accurately tested
444 # use introrev so prefetchskip can be accurately tested
445 introrev = self.introrev()
445 introrev = self.introrev()
446 if self.rev() != introrev:
446 if self.rev() != introrev:
447 introctx = remotefilectx(
447 introctx = remotefilectx(
448 self._repo,
448 self._repo,
449 self._path,
449 self._path,
450 changeid=introrev,
450 changeid=introrev,
451 fileid=self._filenode,
451 fileid=self._filenode,
452 filelog=self._filelog,
452 filelog=self._filelog,
453 ancestormap=self._ancestormap,
453 ancestormap=self._ancestormap,
454 )
454 )
455
455
456 # like self.ancestors, but append to "fetch" and skip visiting parents
456 # like self.ancestors, but append to "fetch" and skip visiting parents
457 # of nodes in "prefetchskip".
457 # of nodes in "prefetchskip".
458 fetch = []
458 fetch = []
459 seen = set()
459 seen = set()
460 queue = collections.deque((introctx,))
460 queue = collections.deque((introctx,))
461 seen.add(introctx.node())
461 seen.add(introctx.node())
462 while queue:
462 while queue:
463 current = queue.pop()
463 current = queue.pop()
464 if current.filenode() != self.filenode():
464 if current.filenode() != self.filenode():
465 # this is a "joint point". fastannotate needs contents of
465 # this is a "joint point". fastannotate needs contents of
466 # "joint point"s to calculate diffs for side branches.
466 # "joint point"s to calculate diffs for side branches.
467 fetch.append((current.path(), hex(current.filenode())))
467 fetch.append((current.path(), hex(current.filenode())))
468 if prefetchskip and current in prefetchskip:
468 if prefetchskip and current in prefetchskip:
469 continue
469 continue
470 for parent in current.parents():
470 for parent in current.parents():
471 if parent.node() not in seen:
471 if parent.node() not in seen:
472 seen.add(parent.node())
472 seen.add(parent.node())
473 queue.append(parent)
473 queue.append(parent)
474
474
475 self._repo.ui.debug(
475 self._repo.ui.debug(
476 b'remotefilelog: prefetching %d files '
476 b'remotefilelog: prefetching %d files '
477 b'for annotate\n' % len(fetch)
477 b'for annotate\n' % len(fetch)
478 )
478 )
479 if fetch:
479 if fetch:
480 self._repo.fileservice.prefetch(fetch)
480 self._repo.fileservice.prefetch(fetch)
481 return super(remotefilectx, self).annotate(*args, **kwargs)
481 return super(remotefilectx, self).annotate(*args, **kwargs)
482
482
483 # Return empty set so that the hg serve and thg don't stack trace
483 # Return empty set so that the hg serve and thg don't stack trace
484 def children(self):
484 def children(self):
485 return []
485 return []
486
486
487
487
488 class remoteworkingfilectx(context.workingfilectx, remotefilectx):
488 class remoteworkingfilectx(context.workingfilectx, remotefilectx):
489 def __init__(self, repo, path, filelog=None, workingctx=None):
489 def __init__(self, repo, path, filelog=None, workingctx=None):
490 self._ancestormap = None
490 self._ancestormap = None
491 super(remoteworkingfilectx, self).__init__(
491 super(remoteworkingfilectx, self).__init__(
492 repo, path, filelog, workingctx
492 repo, path, filelog, workingctx
493 )
493 )
494
494
495 def parents(self):
495 def parents(self):
496 return remotefilectx.parents(self)
496 return remotefilectx.parents(self)
497
497
498 def ancestormap(self):
498 def ancestormap(self):
499 if not self._ancestormap:
499 if not self._ancestormap:
500 path = self._path
500 path = self._path
501 pcl = self._changectx._parents
501 pcl = self._changectx._parents
502 renamed = self.renamed()
502 renamed = self.renamed()
503
503
504 if renamed:
504 if renamed:
505 p1 = renamed
505 p1 = renamed
506 else:
506 else:
507 p1 = (path, pcl[0]._manifest.get(path, nullid))
507 p1 = (path, pcl[0]._manifest.get(path, nullid))
508
508
509 p2 = (path, nullid)
509 p2 = (path, nullid)
510 if len(pcl) > 1:
510 if len(pcl) > 1:
511 p2 = (path, pcl[1]._manifest.get(path, nullid))
511 p2 = (path, pcl[1]._manifest.get(path, nullid))
512
512
513 m = {}
513 m = {}
514 if p1[1] != nullid:
514 if p1[1] != nullid:
515 p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
515 p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
516 m.update(p1ctx.filelog().ancestormap(p1[1]))
516 m.update(p1ctx.filelog().ancestormap(p1[1]))
517
517
518 if p2[1] != nullid:
518 if p2[1] != nullid:
519 p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
519 p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
520 m.update(p2ctx.filelog().ancestormap(p2[1]))
520 m.update(p2ctx.filelog().ancestormap(p2[1]))
521
521
522 copyfrom = b''
522 copyfrom = b''
523 if renamed:
523 if renamed:
524 copyfrom = renamed[0]
524 copyfrom = renamed[0]
525 m[None] = (p1[1], p2[1], nullid, copyfrom)
525 m[None] = (p1[1], p2[1], nullid, copyfrom)
526 self._ancestormap = m
526 self._ancestormap = m
527
527
528 return self._ancestormap
528 return self._ancestormap
General Comments 0
You need to be logged in to leave comments. Login now