##// END OF EJS Templates
remotefilelog: remove pointless return statement from constructor...
Martin von Zweigbergk -
r41396:784ab13b default
parent child Browse files
Show More
@@ -1,491 +1,491 b''
1 # remotefilectx.py - filectx/workingfilectx implementations for remotefilelog
1 # remotefilectx.py - filectx/workingfilectx implementations for remotefilelog
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import collections
9 import collections
10 import time
10 import time
11
11
12 from mercurial.node import bin, hex, nullid, nullrev
12 from mercurial.node import bin, hex, nullid, nullrev
13 from mercurial import (
13 from mercurial import (
14 ancestor,
14 ancestor,
15 context,
15 context,
16 error,
16 error,
17 phases,
17 phases,
18 pycompat,
18 pycompat,
19 util,
19 util,
20 )
20 )
21 from . import shallowutil
21 from . import shallowutil
22
22
23 propertycache = util.propertycache
23 propertycache = util.propertycache
24 FASTLOG_TIMEOUT_IN_SECS = 0.5
24 FASTLOG_TIMEOUT_IN_SECS = 0.5
25
25
26 class remotefilectx(context.filectx):
26 class remotefilectx(context.filectx):
27 def __init__(self, repo, path, changeid=None, fileid=None,
27 def __init__(self, repo, path, changeid=None, fileid=None,
28 filelog=None, changectx=None, ancestormap=None):
28 filelog=None, changectx=None, ancestormap=None):
29 if fileid == nullrev:
29 if fileid == nullrev:
30 fileid = nullid
30 fileid = nullid
31 if fileid and len(fileid) == 40:
31 if fileid and len(fileid) == 40:
32 fileid = bin(fileid)
32 fileid = bin(fileid)
33 super(remotefilectx, self).__init__(repo, path, changeid,
33 super(remotefilectx, self).__init__(repo, path, changeid,
34 fileid, filelog, changectx)
34 fileid, filelog, changectx)
35 self._ancestormap = ancestormap
35 self._ancestormap = ancestormap
36
36
37 def size(self):
37 def size(self):
38 return self._filelog.size(self._filenode)
38 return self._filelog.size(self._filenode)
39
39
40 @propertycache
40 @propertycache
41 def _changeid(self):
41 def _changeid(self):
42 if '_changeid' in self.__dict__:
42 if '_changeid' in self.__dict__:
43 return self._changeid
43 return self._changeid
44 elif '_changectx' in self.__dict__:
44 elif '_changectx' in self.__dict__:
45 return self._changectx.rev()
45 return self._changectx.rev()
46 elif '_descendantrev' in self.__dict__:
46 elif '_descendantrev' in self.__dict__:
47 # this file context was created from a revision with a known
47 # this file context was created from a revision with a known
48 # descendant, we can (lazily) correct for linkrev aliases
48 # descendant, we can (lazily) correct for linkrev aliases
49 linknode = self._adjustlinknode(self._path, self._filelog,
49 linknode = self._adjustlinknode(self._path, self._filelog,
50 self._filenode, self._descendantrev)
50 self._filenode, self._descendantrev)
51 return self._repo.unfiltered().changelog.rev(linknode)
51 return self._repo.unfiltered().changelog.rev(linknode)
52 else:
52 else:
53 return self.linkrev()
53 return self.linkrev()
54
54
55 def filectx(self, fileid, changeid=None):
55 def filectx(self, fileid, changeid=None):
56 '''opens an arbitrary revision of the file without
56 '''opens an arbitrary revision of the file without
57 opening a new filelog'''
57 opening a new filelog'''
58 return remotefilectx(self._repo, self._path, fileid=fileid,
58 return remotefilectx(self._repo, self._path, fileid=fileid,
59 filelog=self._filelog, changeid=changeid)
59 filelog=self._filelog, changeid=changeid)
60
60
61 def linkrev(self):
61 def linkrev(self):
62 return self._linkrev
62 return self._linkrev
63
63
64 @propertycache
64 @propertycache
65 def _linkrev(self):
65 def _linkrev(self):
66 if self._filenode == nullid:
66 if self._filenode == nullid:
67 return nullrev
67 return nullrev
68
68
69 ancestormap = self.ancestormap()
69 ancestormap = self.ancestormap()
70 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
70 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
71 rev = self._repo.changelog.nodemap.get(linknode)
71 rev = self._repo.changelog.nodemap.get(linknode)
72 if rev is not None:
72 if rev is not None:
73 return rev
73 return rev
74
74
75 # Search all commits for the appropriate linkrev (slow, but uncommon)
75 # Search all commits for the appropriate linkrev (slow, but uncommon)
76 path = self._path
76 path = self._path
77 fileid = self._filenode
77 fileid = self._filenode
78 cl = self._repo.unfiltered().changelog
78 cl = self._repo.unfiltered().changelog
79 mfl = self._repo.manifestlog
79 mfl = self._repo.manifestlog
80
80
81 for rev in range(len(cl) - 1, 0, -1):
81 for rev in range(len(cl) - 1, 0, -1):
82 node = cl.node(rev)
82 node = cl.node(rev)
83 data = cl.read(node) # get changeset data (we avoid object creation)
83 data = cl.read(node) # get changeset data (we avoid object creation)
84 if path in data[3]: # checking the 'files' field.
84 if path in data[3]: # checking the 'files' field.
85 # The file has been touched, check if the hash is what we're
85 # The file has been touched, check if the hash is what we're
86 # looking for.
86 # looking for.
87 if fileid == mfl[data[0]].readfast().get(path):
87 if fileid == mfl[data[0]].readfast().get(path):
88 return rev
88 return rev
89
89
90 # Couldn't find the linkrev. This should generally not happen, and will
90 # Couldn't find the linkrev. This should generally not happen, and will
91 # likely cause a crash.
91 # likely cause a crash.
92 return None
92 return None
93
93
94 def introrev(self):
94 def introrev(self):
95 """return the rev of the changeset which introduced this file revision
95 """return the rev of the changeset which introduced this file revision
96
96
97 This method is different from linkrev because it take into account the
97 This method is different from linkrev because it take into account the
98 changeset the filectx was created from. It ensures the returned
98 changeset the filectx was created from. It ensures the returned
99 revision is one of its ancestors. This prevents bugs from
99 revision is one of its ancestors. This prevents bugs from
100 'linkrev-shadowing' when a file revision is used by multiple
100 'linkrev-shadowing' when a file revision is used by multiple
101 changesets.
101 changesets.
102 """
102 """
103 lkr = self.linkrev()
103 lkr = self.linkrev()
104 attrs = vars(self)
104 attrs = vars(self)
105 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
105 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
106 if noctx or self.rev() == lkr:
106 if noctx or self.rev() == lkr:
107 return lkr
107 return lkr
108 linknode = self._adjustlinknode(self._path, self._filelog,
108 linknode = self._adjustlinknode(self._path, self._filelog,
109 self._filenode, self.rev(),
109 self._filenode, self.rev(),
110 inclusive=True)
110 inclusive=True)
111 return self._repo.changelog.rev(linknode)
111 return self._repo.changelog.rev(linknode)
112
112
113 def renamed(self):
113 def renamed(self):
114 """check if file was actually renamed in this changeset revision
114 """check if file was actually renamed in this changeset revision
115
115
116 If rename logged in file revision, we report copy for changeset only
116 If rename logged in file revision, we report copy for changeset only
117 if file revisions linkrev points back to the changeset in question
117 if file revisions linkrev points back to the changeset in question
118 or both changeset parents contain different file revisions.
118 or both changeset parents contain different file revisions.
119 """
119 """
120 ancestormap = self.ancestormap()
120 ancestormap = self.ancestormap()
121
121
122 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
122 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
123 if not copyfrom:
123 if not copyfrom:
124 return None
124 return None
125
125
126 renamed = (copyfrom, p1)
126 renamed = (copyfrom, p1)
127 if self.rev() == self.linkrev():
127 if self.rev() == self.linkrev():
128 return renamed
128 return renamed
129
129
130 name = self.path()
130 name = self.path()
131 fnode = self._filenode
131 fnode = self._filenode
132 for p in self._changectx.parents():
132 for p in self._changectx.parents():
133 try:
133 try:
134 if fnode == p.filenode(name):
134 if fnode == p.filenode(name):
135 return None
135 return None
136 except error.LookupError:
136 except error.LookupError:
137 pass
137 pass
138 return renamed
138 return renamed
139
139
140 def ancestormap(self):
140 def ancestormap(self):
141 if not self._ancestormap:
141 if not self._ancestormap:
142 self._ancestormap = self.filelog().ancestormap(self._filenode)
142 self._ancestormap = self.filelog().ancestormap(self._filenode)
143
143
144 return self._ancestormap
144 return self._ancestormap
145
145
146 def parents(self):
146 def parents(self):
147 repo = self._repo
147 repo = self._repo
148 ancestormap = self.ancestormap()
148 ancestormap = self.ancestormap()
149
149
150 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
150 p1, p2, linknode, copyfrom = ancestormap[self._filenode]
151 results = []
151 results = []
152 if p1 != nullid:
152 if p1 != nullid:
153 path = copyfrom or self._path
153 path = copyfrom or self._path
154 flog = repo.file(path)
154 flog = repo.file(path)
155 p1ctx = remotefilectx(repo, path, fileid=p1, filelog=flog,
155 p1ctx = remotefilectx(repo, path, fileid=p1, filelog=flog,
156 ancestormap=ancestormap)
156 ancestormap=ancestormap)
157 p1ctx._descendantrev = self.rev()
157 p1ctx._descendantrev = self.rev()
158 results.append(p1ctx)
158 results.append(p1ctx)
159
159
160 if p2 != nullid:
160 if p2 != nullid:
161 path = self._path
161 path = self._path
162 flog = repo.file(path)
162 flog = repo.file(path)
163 p2ctx = remotefilectx(repo, path, fileid=p2, filelog=flog,
163 p2ctx = remotefilectx(repo, path, fileid=p2, filelog=flog,
164 ancestormap=ancestormap)
164 ancestormap=ancestormap)
165 p2ctx._descendantrev = self.rev()
165 p2ctx._descendantrev = self.rev()
166 results.append(p2ctx)
166 results.append(p2ctx)
167
167
168 return results
168 return results
169
169
170 def _nodefromancrev(self, ancrev, cl, mfl, path, fnode):
170 def _nodefromancrev(self, ancrev, cl, mfl, path, fnode):
171 """returns the node for <path> in <ancrev> if content matches <fnode>"""
171 """returns the node for <path> in <ancrev> if content matches <fnode>"""
172 ancctx = cl.read(ancrev) # This avoids object creation.
172 ancctx = cl.read(ancrev) # This avoids object creation.
173 manifestnode, files = ancctx[0], ancctx[3]
173 manifestnode, files = ancctx[0], ancctx[3]
174 # If the file was touched in this ancestor, and the content is similar
174 # If the file was touched in this ancestor, and the content is similar
175 # to the one we are searching for.
175 # to the one we are searching for.
176 if path in files and fnode == mfl[manifestnode].readfast().get(path):
176 if path in files and fnode == mfl[manifestnode].readfast().get(path):
177 return cl.node(ancrev)
177 return cl.node(ancrev)
178 return None
178 return None
179
179
180 def _adjustlinknode(self, path, filelog, fnode, srcrev, inclusive=False):
180 def _adjustlinknode(self, path, filelog, fnode, srcrev, inclusive=False):
181 """return the first ancestor of <srcrev> introducing <fnode>
181 """return the first ancestor of <srcrev> introducing <fnode>
182
182
183 If the linkrev of the file revision does not point to an ancestor of
183 If the linkrev of the file revision does not point to an ancestor of
184 srcrev, we'll walk down the ancestors until we find one introducing
184 srcrev, we'll walk down the ancestors until we find one introducing
185 this file revision.
185 this file revision.
186
186
187 :repo: a localrepository object (used to access changelog and manifest)
187 :repo: a localrepository object (used to access changelog and manifest)
188 :path: the file path
188 :path: the file path
189 :fnode: the nodeid of the file revision
189 :fnode: the nodeid of the file revision
190 :filelog: the filelog of this path
190 :filelog: the filelog of this path
191 :srcrev: the changeset revision we search ancestors from
191 :srcrev: the changeset revision we search ancestors from
192 :inclusive: if true, the src revision will also be checked
192 :inclusive: if true, the src revision will also be checked
193
193
194 Note: This is based on adjustlinkrev in core, but it's quite different.
194 Note: This is based on adjustlinkrev in core, but it's quite different.
195
195
196 adjustlinkrev depends on the fact that the linkrev is the bottom most
196 adjustlinkrev depends on the fact that the linkrev is the bottom most
197 node, and uses that as a stopping point for the ancestor traversal. We
197 node, and uses that as a stopping point for the ancestor traversal. We
198 can't do that here because the linknode is not guaranteed to be the
198 can't do that here because the linknode is not guaranteed to be the
199 bottom most one.
199 bottom most one.
200
200
201 In our code here, we actually know what a bunch of potential ancestor
201 In our code here, we actually know what a bunch of potential ancestor
202 linknodes are, so instead of stopping the cheap-ancestor-traversal when
202 linknodes are, so instead of stopping the cheap-ancestor-traversal when
203 we get to a linkrev, we stop when we see any of the known linknodes.
203 we get to a linkrev, we stop when we see any of the known linknodes.
204 """
204 """
205 repo = self._repo
205 repo = self._repo
206 cl = repo.unfiltered().changelog
206 cl = repo.unfiltered().changelog
207 mfl = repo.manifestlog
207 mfl = repo.manifestlog
208 ancestormap = self.ancestormap()
208 ancestormap = self.ancestormap()
209 linknode = ancestormap[fnode][2]
209 linknode = ancestormap[fnode][2]
210
210
211 if srcrev is None:
211 if srcrev is None:
212 # wctx case, used by workingfilectx during mergecopy
212 # wctx case, used by workingfilectx during mergecopy
213 revs = [p.rev() for p in self._repo[None].parents()]
213 revs = [p.rev() for p in self._repo[None].parents()]
214 inclusive = True # we skipped the real (revless) source
214 inclusive = True # we skipped the real (revless) source
215 else:
215 else:
216 revs = [srcrev]
216 revs = [srcrev]
217
217
218 if self._verifylinknode(revs, linknode):
218 if self._verifylinknode(revs, linknode):
219 return linknode
219 return linknode
220
220
221 commonlogkwargs = {
221 commonlogkwargs = {
222 r'revs': ' '.join([hex(cl.node(rev)) for rev in revs]),
222 r'revs': ' '.join([hex(cl.node(rev)) for rev in revs]),
223 r'fnode': hex(fnode),
223 r'fnode': hex(fnode),
224 r'filepath': path,
224 r'filepath': path,
225 r'user': shallowutil.getusername(repo.ui),
225 r'user': shallowutil.getusername(repo.ui),
226 r'reponame': shallowutil.getreponame(repo.ui),
226 r'reponame': shallowutil.getreponame(repo.ui),
227 }
227 }
228
228
229 repo.ui.log('linkrevfixup', 'adjusting linknode\n', **commonlogkwargs)
229 repo.ui.log('linkrevfixup', 'adjusting linknode\n', **commonlogkwargs)
230
230
231 pc = repo._phasecache
231 pc = repo._phasecache
232 seenpublic = False
232 seenpublic = False
233 iteranc = cl.ancestors(revs, inclusive=inclusive)
233 iteranc = cl.ancestors(revs, inclusive=inclusive)
234 for ancrev in iteranc:
234 for ancrev in iteranc:
235 # First, check locally-available history.
235 # First, check locally-available history.
236 lnode = self._nodefromancrev(ancrev, cl, mfl, path, fnode)
236 lnode = self._nodefromancrev(ancrev, cl, mfl, path, fnode)
237 if lnode is not None:
237 if lnode is not None:
238 return lnode
238 return lnode
239
239
240 # adjusting linknode can be super-slow. To mitigate the issue
240 # adjusting linknode can be super-slow. To mitigate the issue
241 # we use two heuristics: calling fastlog and forcing remotefilelog
241 # we use two heuristics: calling fastlog and forcing remotefilelog
242 # prefetch
242 # prefetch
243 if not seenpublic and pc.phase(repo, ancrev) == phases.public:
243 if not seenpublic and pc.phase(repo, ancrev) == phases.public:
244 # TODO: there used to be a codepath to fetch linknodes
244 # TODO: there used to be a codepath to fetch linknodes
245 # from a server as a fast path, but it appeared to
245 # from a server as a fast path, but it appeared to
246 # depend on an API FB added to their phabricator.
246 # depend on an API FB added to their phabricator.
247 lnode = self._forceprefetch(repo, path, fnode, revs,
247 lnode = self._forceprefetch(repo, path, fnode, revs,
248 commonlogkwargs)
248 commonlogkwargs)
249 if lnode:
249 if lnode:
250 return lnode
250 return lnode
251 seenpublic = True
251 seenpublic = True
252
252
253 return linknode
253 return linknode
254
254
255 def _forceprefetch(self, repo, path, fnode, revs,
255 def _forceprefetch(self, repo, path, fnode, revs,
256 commonlogkwargs):
256 commonlogkwargs):
257 # This next part is super non-obvious, so big comment block time!
257 # This next part is super non-obvious, so big comment block time!
258 #
258 #
259 # It is possible to get extremely bad performance here when a fairly
259 # It is possible to get extremely bad performance here when a fairly
260 # common set of circumstances occur when this extension is combined
260 # common set of circumstances occur when this extension is combined
261 # with a server-side commit rewriting extension like pushrebase.
261 # with a server-side commit rewriting extension like pushrebase.
262 #
262 #
263 # First, an engineer creates Commit A and pushes it to the server.
263 # First, an engineer creates Commit A and pushes it to the server.
264 # While the server's data structure will have the correct linkrev
264 # While the server's data structure will have the correct linkrev
265 # for the files touched in Commit A, the client will have the
265 # for the files touched in Commit A, the client will have the
266 # linkrev of the local commit, which is "invalid" because it's not
266 # linkrev of the local commit, which is "invalid" because it's not
267 # an ancestor of the main line of development.
267 # an ancestor of the main line of development.
268 #
268 #
269 # The client will never download the remotefilelog with the correct
269 # The client will never download the remotefilelog with the correct
270 # linkrev as long as nobody else touches that file, since the file
270 # linkrev as long as nobody else touches that file, since the file
271 # data and history hasn't changed since Commit A.
271 # data and history hasn't changed since Commit A.
272 #
272 #
273 # After a long time (or a short time in a heavily used repo), if the
273 # After a long time (or a short time in a heavily used repo), if the
274 # same engineer returns to change the same file, some commands --
274 # same engineer returns to change the same file, some commands --
275 # such as amends of commits with file moves, logs, diffs, etc --
275 # such as amends of commits with file moves, logs, diffs, etc --
276 # can trigger this _adjustlinknode code. In those cases, finding
276 # can trigger this _adjustlinknode code. In those cases, finding
277 # the correct rev can become quite expensive, as the correct
277 # the correct rev can become quite expensive, as the correct
278 # revision is far back in history and we need to walk back through
278 # revision is far back in history and we need to walk back through
279 # history to find it.
279 # history to find it.
280 #
280 #
281 # In order to improve this situation, we force a prefetch of the
281 # In order to improve this situation, we force a prefetch of the
282 # remotefilelog data blob for the file we were called on. We do this
282 # remotefilelog data blob for the file we were called on. We do this
283 # at most once, when we first see a public commit in the history we
283 # at most once, when we first see a public commit in the history we
284 # are traversing.
284 # are traversing.
285 #
285 #
286 # Forcing the prefetch means we will download the remote blob even
286 # Forcing the prefetch means we will download the remote blob even
287 # if we have the "correct" blob in the local store. Since the union
287 # if we have the "correct" blob in the local store. Since the union
288 # store checks the remote store first, this means we are much more
288 # store checks the remote store first, this means we are much more
289 # likely to get the correct linkrev at this point.
289 # likely to get the correct linkrev at this point.
290 #
290 #
291 # In rare circumstances (such as the server having a suboptimal
291 # In rare circumstances (such as the server having a suboptimal
292 # linkrev for our use case), we will fall back to the old slow path.
292 # linkrev for our use case), we will fall back to the old slow path.
293 #
293 #
294 # We may want to add additional heuristics here in the future if
294 # We may want to add additional heuristics here in the future if
295 # the slow path is used too much. One promising possibility is using
295 # the slow path is used too much. One promising possibility is using
296 # obsolescence markers to find a more-likely-correct linkrev.
296 # obsolescence markers to find a more-likely-correct linkrev.
297
297
298 logmsg = ''
298 logmsg = ''
299 start = time.time()
299 start = time.time()
300 try:
300 try:
301 repo.fileservice.prefetch([(path, hex(fnode))], force=True)
301 repo.fileservice.prefetch([(path, hex(fnode))], force=True)
302
302
303 # Now that we've downloaded a new blob from the server,
303 # Now that we've downloaded a new blob from the server,
304 # we need to rebuild the ancestor map to recompute the
304 # we need to rebuild the ancestor map to recompute the
305 # linknodes.
305 # linknodes.
306 self._ancestormap = None
306 self._ancestormap = None
307 linknode = self.ancestormap()[fnode][2] # 2 is linknode
307 linknode = self.ancestormap()[fnode][2] # 2 is linknode
308 if self._verifylinknode(revs, linknode):
308 if self._verifylinknode(revs, linknode):
309 logmsg = 'remotefilelog prefetching succeeded'
309 logmsg = 'remotefilelog prefetching succeeded'
310 return linknode
310 return linknode
311 logmsg = 'remotefilelog prefetching not found'
311 logmsg = 'remotefilelog prefetching not found'
312 return None
312 return None
313 except Exception as e:
313 except Exception as e:
314 logmsg = 'remotefilelog prefetching failed (%s)' % e
314 logmsg = 'remotefilelog prefetching failed (%s)' % e
315 return None
315 return None
316 finally:
316 finally:
317 elapsed = time.time() - start
317 elapsed = time.time() - start
318 repo.ui.log('linkrevfixup', logmsg + '\n', elapsed=elapsed * 1000,
318 repo.ui.log('linkrevfixup', logmsg + '\n', elapsed=elapsed * 1000,
319 **pycompat.strkwargs(commonlogkwargs))
319 **pycompat.strkwargs(commonlogkwargs))
320
320
321 def _verifylinknode(self, revs, linknode):
321 def _verifylinknode(self, revs, linknode):
322 """
322 """
323 Check if a linknode is correct one for the current history.
323 Check if a linknode is correct one for the current history.
324
324
325 That is, return True if the linkrev is the ancestor of any of the
325 That is, return True if the linkrev is the ancestor of any of the
326 passed in revs, otherwise return False.
326 passed in revs, otherwise return False.
327
327
328 `revs` is a list that usually has one element -- usually the wdir parent
328 `revs` is a list that usually has one element -- usually the wdir parent
329 or the user-passed rev we're looking back from. It may contain two revs
329 or the user-passed rev we're looking back from. It may contain two revs
330 when there is a merge going on, or zero revs when a root node with no
330 when there is a merge going on, or zero revs when a root node with no
331 parents is being created.
331 parents is being created.
332 """
332 """
333 if not revs:
333 if not revs:
334 return False
334 return False
335 try:
335 try:
336 # Use the C fastpath to check if the given linknode is correct.
336 # Use the C fastpath to check if the given linknode is correct.
337 cl = self._repo.unfiltered().changelog
337 cl = self._repo.unfiltered().changelog
338 return any(cl.isancestor(linknode, cl.node(r)) for r in revs)
338 return any(cl.isancestor(linknode, cl.node(r)) for r in revs)
339 except error.LookupError:
339 except error.LookupError:
340 # The linknode read from the blob may have been stripped or
340 # The linknode read from the blob may have been stripped or
341 # otherwise not present in the repository anymore. Do not fail hard
341 # otherwise not present in the repository anymore. Do not fail hard
342 # in this case. Instead, return false and continue the search for
342 # in this case. Instead, return false and continue the search for
343 # the correct linknode.
343 # the correct linknode.
344 return False
344 return False
345
345
346 def ancestors(self, followfirst=False):
346 def ancestors(self, followfirst=False):
347 ancestors = []
347 ancestors = []
348 queue = collections.deque((self,))
348 queue = collections.deque((self,))
349 seen = set()
349 seen = set()
350 while queue:
350 while queue:
351 current = queue.pop()
351 current = queue.pop()
352 if current.filenode() in seen:
352 if current.filenode() in seen:
353 continue
353 continue
354 seen.add(current.filenode())
354 seen.add(current.filenode())
355
355
356 ancestors.append(current)
356 ancestors.append(current)
357
357
358 parents = current.parents()
358 parents = current.parents()
359 first = True
359 first = True
360 for p in parents:
360 for p in parents:
361 if first or not followfirst:
361 if first or not followfirst:
362 queue.append(p)
362 queue.append(p)
363 first = False
363 first = False
364
364
365 # Remove self
365 # Remove self
366 ancestors.pop(0)
366 ancestors.pop(0)
367
367
368 # Sort by linkrev
368 # Sort by linkrev
369 # The copy tracing algorithm depends on these coming out in order
369 # The copy tracing algorithm depends on these coming out in order
370 ancestors = sorted(ancestors, reverse=True, key=lambda x:x.linkrev())
370 ancestors = sorted(ancestors, reverse=True, key=lambda x:x.linkrev())
371
371
372 for ancestor in ancestors:
372 for ancestor in ancestors:
373 yield ancestor
373 yield ancestor
374
374
375 def ancestor(self, fc2, actx):
375 def ancestor(self, fc2, actx):
376 # the easy case: no (relevant) renames
376 # the easy case: no (relevant) renames
377 if fc2.path() == self.path() and self.path() in actx:
377 if fc2.path() == self.path() and self.path() in actx:
378 return actx[self.path()]
378 return actx[self.path()]
379
379
380 # the next easiest cases: unambiguous predecessor (name trumps
380 # the next easiest cases: unambiguous predecessor (name trumps
381 # history)
381 # history)
382 if self.path() in actx and fc2.path() not in actx:
382 if self.path() in actx and fc2.path() not in actx:
383 return actx[self.path()]
383 return actx[self.path()]
384 if fc2.path() in actx and self.path() not in actx:
384 if fc2.path() in actx and self.path() not in actx:
385 return actx[fc2.path()]
385 return actx[fc2.path()]
386
386
387 # do a full traversal
387 # do a full traversal
388 amap = self.ancestormap()
388 amap = self.ancestormap()
389 bmap = fc2.ancestormap()
389 bmap = fc2.ancestormap()
390
390
391 def parents(x):
391 def parents(x):
392 f, n = x
392 f, n = x
393 p = amap.get(n) or bmap.get(n)
393 p = amap.get(n) or bmap.get(n)
394 if not p:
394 if not p:
395 return []
395 return []
396
396
397 return [(p[3] or f, p[0]), (f, p[1])]
397 return [(p[3] or f, p[0]), (f, p[1])]
398
398
399 a = (self.path(), self.filenode())
399 a = (self.path(), self.filenode())
400 b = (fc2.path(), fc2.filenode())
400 b = (fc2.path(), fc2.filenode())
401 result = ancestor.genericancestor(a, b, parents)
401 result = ancestor.genericancestor(a, b, parents)
402 if result:
402 if result:
403 f, n = result
403 f, n = result
404 r = remotefilectx(self._repo, f, fileid=n,
404 r = remotefilectx(self._repo, f, fileid=n,
405 ancestormap=amap)
405 ancestormap=amap)
406 return r
406 return r
407
407
408 return None
408 return None
409
409
410 def annotate(self, *args, **kwargs):
410 def annotate(self, *args, **kwargs):
411 introctx = self
411 introctx = self
412 prefetchskip = kwargs.pop(r'prefetchskip', None)
412 prefetchskip = kwargs.pop(r'prefetchskip', None)
413 if prefetchskip:
413 if prefetchskip:
414 # use introrev so prefetchskip can be accurately tested
414 # use introrev so prefetchskip can be accurately tested
415 introrev = self.introrev()
415 introrev = self.introrev()
416 if self.rev() != introrev:
416 if self.rev() != introrev:
417 introctx = remotefilectx(self._repo, self._path,
417 introctx = remotefilectx(self._repo, self._path,
418 changeid=introrev,
418 changeid=introrev,
419 fileid=self._filenode,
419 fileid=self._filenode,
420 filelog=self._filelog,
420 filelog=self._filelog,
421 ancestormap=self._ancestormap)
421 ancestormap=self._ancestormap)
422
422
423 # like self.ancestors, but append to "fetch" and skip visiting parents
423 # like self.ancestors, but append to "fetch" and skip visiting parents
424 # of nodes in "prefetchskip".
424 # of nodes in "prefetchskip".
425 fetch = []
425 fetch = []
426 seen = set()
426 seen = set()
427 queue = collections.deque((introctx,))
427 queue = collections.deque((introctx,))
428 seen.add(introctx.node())
428 seen.add(introctx.node())
429 while queue:
429 while queue:
430 current = queue.pop()
430 current = queue.pop()
431 if current.filenode() != self.filenode():
431 if current.filenode() != self.filenode():
432 # this is a "joint point". fastannotate needs contents of
432 # this is a "joint point". fastannotate needs contents of
433 # "joint point"s to calculate diffs for side branches.
433 # "joint point"s to calculate diffs for side branches.
434 fetch.append((current.path(), hex(current.filenode())))
434 fetch.append((current.path(), hex(current.filenode())))
435 if prefetchskip and current in prefetchskip:
435 if prefetchskip and current in prefetchskip:
436 continue
436 continue
437 for parent in current.parents():
437 for parent in current.parents():
438 if parent.node() not in seen:
438 if parent.node() not in seen:
439 seen.add(parent.node())
439 seen.add(parent.node())
440 queue.append(parent)
440 queue.append(parent)
441
441
442 self._repo.ui.debug('remotefilelog: prefetching %d files '
442 self._repo.ui.debug('remotefilelog: prefetching %d files '
443 'for annotate\n' % len(fetch))
443 'for annotate\n' % len(fetch))
444 if fetch:
444 if fetch:
445 self._repo.fileservice.prefetch(fetch)
445 self._repo.fileservice.prefetch(fetch)
446 return super(remotefilectx, self).annotate(*args, **kwargs)
446 return super(remotefilectx, self).annotate(*args, **kwargs)
447
447
448 # Return empty set so that the hg serve and thg don't stack trace
448 # Return empty set so that the hg serve and thg don't stack trace
449 def children(self):
449 def children(self):
450 return []
450 return []
451
451
452 class remoteworkingfilectx(context.workingfilectx, remotefilectx):
452 class remoteworkingfilectx(context.workingfilectx, remotefilectx):
453 def __init__(self, repo, path, filelog=None, workingctx=None):
453 def __init__(self, repo, path, filelog=None, workingctx=None):
454 self._ancestormap = None
454 self._ancestormap = None
455 return super(remoteworkingfilectx, self).__init__(repo, path,
455 super(remoteworkingfilectx, self).__init__(repo, path, filelog,
456 filelog, workingctx)
456 workingctx)
457
457
458 def parents(self):
458 def parents(self):
459 return remotefilectx.parents(self)
459 return remotefilectx.parents(self)
460
460
461 def ancestormap(self):
461 def ancestormap(self):
462 if not self._ancestormap:
462 if not self._ancestormap:
463 path = self._path
463 path = self._path
464 pcl = self._changectx._parents
464 pcl = self._changectx._parents
465 renamed = self.renamed()
465 renamed = self.renamed()
466
466
467 if renamed:
467 if renamed:
468 p1 = renamed
468 p1 = renamed
469 else:
469 else:
470 p1 = (path, pcl[0]._manifest.get(path, nullid))
470 p1 = (path, pcl[0]._manifest.get(path, nullid))
471
471
472 p2 = (path, nullid)
472 p2 = (path, nullid)
473 if len(pcl) > 1:
473 if len(pcl) > 1:
474 p2 = (path, pcl[1]._manifest.get(path, nullid))
474 p2 = (path, pcl[1]._manifest.get(path, nullid))
475
475
476 m = {}
476 m = {}
477 if p1[1] != nullid:
477 if p1[1] != nullid:
478 p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
478 p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
479 m.update(p1ctx.filelog().ancestormap(p1[1]))
479 m.update(p1ctx.filelog().ancestormap(p1[1]))
480
480
481 if p2[1] != nullid:
481 if p2[1] != nullid:
482 p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
482 p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
483 m.update(p2ctx.filelog().ancestormap(p2[1]))
483 m.update(p2ctx.filelog().ancestormap(p2[1]))
484
484
485 copyfrom = ''
485 copyfrom = ''
486 if renamed:
486 if renamed:
487 copyfrom = renamed[0]
487 copyfrom = renamed[0]
488 m[None] = (p1[1], p2[1], nullid, copyfrom)
488 m[None] = (p1[1], p2[1], nullid, copyfrom)
489 self._ancestormap = m
489 self._ancestormap = m
490
490
491 return self._ancestormap
491 return self._ancestormap
General Comments 0
You need to be logged in to leave comments. Login now