##// END OF EJS Templates
remotefilelog: drop dead code...
marmoute -
r52196:e5b710ce default
parent child Browse files
Show More
@@ -1,502 +1,473 b''
1 # remotefilelog.py - filelog implementation where filelog history is stored
1 # remotefilelog.py - filelog implementation where filelog history is stored
2 # remotely
2 # remotely
3 #
3 #
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import os
11
10
12 from mercurial.node import bin
11 from mercurial.node import bin
13 from mercurial.i18n import _
12 from mercurial.i18n import _
14 from mercurial import (
13 from mercurial import (
15 ancestor,
14 ancestor,
16 error,
15 error,
17 mdiff,
16 mdiff,
18 revlog,
17 revlog,
19 )
18 )
20 from mercurial.utils import storageutil
19 from mercurial.utils import storageutil
21 from mercurial.revlogutils import flagutil
20 from mercurial.revlogutils import flagutil
22
21
23 from . import (
22 from . import (
24 constants,
23 constants,
25 fileserverclient,
26 shallowutil,
24 shallowutil,
27 )
25 )
28
26
29
27
30 class remotefilelognodemap:
28 class remotefilelognodemap:
31 def __init__(self, filename, store):
29 def __init__(self, filename, store):
32 self._filename = filename
30 self._filename = filename
33 self._store = store
31 self._store = store
34
32
35 def __contains__(self, node):
33 def __contains__(self, node):
36 missing = self._store.getmissing([(self._filename, node)])
34 missing = self._store.getmissing([(self._filename, node)])
37 return not bool(missing)
35 return not bool(missing)
38
36
39 def __get__(self, node):
37 def __get__(self, node):
40 if node not in self:
38 if node not in self:
41 raise KeyError(node)
39 raise KeyError(node)
42 return node
40 return node
43
41
44
42
45 class remotefilelog:
43 class remotefilelog:
46
44
47 _flagserrorclass = error.RevlogError
45 _flagserrorclass = error.RevlogError
48
46
49 def __init__(self, opener, path, repo):
47 def __init__(self, opener, path, repo):
50 self.opener = opener
48 self.opener = opener
51 self.filename = path
49 self.filename = path
52 self.repo = repo
50 self.repo = repo
53 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
51 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
54
52
55 self.version = 1
53 self.version = 1
56
54
57 self._flagprocessors = dict(flagutil.flagprocessors)
55 self._flagprocessors = dict(flagutil.flagprocessors)
58
56
59 def read(self, node):
57 def read(self, node):
60 """returns the file contents at this node"""
58 """returns the file contents at this node"""
61 t = self.revision(node)
59 t = self.revision(node)
62 if not t.startswith(b'\1\n'):
60 if not t.startswith(b'\1\n'):
63 return t
61 return t
64 s = t.index(b'\1\n', 2)
62 s = t.index(b'\1\n', 2)
65 return t[s + 2 :]
63 return t[s + 2 :]
66
64
67 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
65 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
68 # hash with the metadata, like in vanilla filelogs
66 # hash with the metadata, like in vanilla filelogs
69 hashtext = shallowutil.createrevlogtext(
67 hashtext = shallowutil.createrevlogtext(
70 text, meta.get(b'copy'), meta.get(b'copyrev')
68 text, meta.get(b'copy'), meta.get(b'copyrev')
71 )
69 )
72 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
70 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
73 return self.addrevision(
71 return self.addrevision(
74 hashtext, transaction, linknode, p1, p2, node=node
72 hashtext, transaction, linknode, p1, p2, node=node
75 )
73 )
76
74
77 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
75 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
78 # text passed to "_createfileblob" does not include filelog metadata
76 # text passed to "_createfileblob" does not include filelog metadata
79 header = shallowutil.buildfileblobheader(len(text), flags)
77 header = shallowutil.buildfileblobheader(len(text), flags)
80 data = b"%s\0%s" % (header, text)
78 data = b"%s\0%s" % (header, text)
81
79
82 realp1 = p1
80 realp1 = p1
83 copyfrom = b""
81 copyfrom = b""
84 if meta and b'copy' in meta:
82 if meta and b'copy' in meta:
85 copyfrom = meta[b'copy']
83 copyfrom = meta[b'copy']
86 realp1 = bin(meta[b'copyrev'])
84 realp1 = bin(meta[b'copyrev'])
87
85
88 data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
86 data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
89
87
90 visited = set()
88 visited = set()
91
89
92 pancestors = {}
90 pancestors = {}
93 queue = []
91 queue = []
94 if realp1 != self.repo.nullid:
92 if realp1 != self.repo.nullid:
95 p1flog = self
93 p1flog = self
96 if copyfrom:
94 if copyfrom:
97 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
95 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
98
96
99 pancestors.update(p1flog.ancestormap(realp1))
97 pancestors.update(p1flog.ancestormap(realp1))
100 queue.append(realp1)
98 queue.append(realp1)
101 visited.add(realp1)
99 visited.add(realp1)
102 if p2 != self.repo.nullid:
100 if p2 != self.repo.nullid:
103 pancestors.update(self.ancestormap(p2))
101 pancestors.update(self.ancestormap(p2))
104 queue.append(p2)
102 queue.append(p2)
105 visited.add(p2)
103 visited.add(p2)
106
104
107 ancestortext = b""
105 ancestortext = b""
108
106
109 # add the ancestors in topological order
107 # add the ancestors in topological order
110 while queue:
108 while queue:
111 c = queue.pop(0)
109 c = queue.pop(0)
112 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
110 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
113
111
114 pacopyfrom = pacopyfrom or b''
112 pacopyfrom = pacopyfrom or b''
115 ancestortext += b"%s%s%s%s%s\0" % (
113 ancestortext += b"%s%s%s%s%s\0" % (
116 c,
114 c,
117 pa1,
115 pa1,
118 pa2,
116 pa2,
119 ancestorlinknode,
117 ancestorlinknode,
120 pacopyfrom,
118 pacopyfrom,
121 )
119 )
122
120
123 if pa1 != self.repo.nullid and pa1 not in visited:
121 if pa1 != self.repo.nullid and pa1 not in visited:
124 queue.append(pa1)
122 queue.append(pa1)
125 visited.add(pa1)
123 visited.add(pa1)
126 if pa2 != self.repo.nullid and pa2 not in visited:
124 if pa2 != self.repo.nullid and pa2 not in visited:
127 queue.append(pa2)
125 queue.append(pa2)
128 visited.add(pa2)
126 visited.add(pa2)
129
127
130 data += ancestortext
128 data += ancestortext
131
129
132 return data
130 return data
133
131
134 def addrevision(
132 def addrevision(
135 self,
133 self,
136 text,
134 text,
137 transaction,
135 transaction,
138 linknode,
136 linknode,
139 p1,
137 p1,
140 p2,
138 p2,
141 cachedelta=None,
139 cachedelta=None,
142 node=None,
140 node=None,
143 flags=revlog.REVIDX_DEFAULT_FLAGS,
141 flags=revlog.REVIDX_DEFAULT_FLAGS,
144 sidedata=None,
142 sidedata=None,
145 ):
143 ):
146 # text passed to "addrevision" includes hg filelog metadata header
144 # text passed to "addrevision" includes hg filelog metadata header
147 if node is None:
145 if node is None:
148 node = storageutil.hashrevisionsha1(text, p1, p2)
146 node = storageutil.hashrevisionsha1(text, p1, p2)
149
147
150 meta, metaoffset = storageutil.parsemeta(text)
148 meta, metaoffset = storageutil.parsemeta(text)
151 rawtext, validatehash = flagutil.processflagswrite(
149 rawtext, validatehash = flagutil.processflagswrite(
152 self,
150 self,
153 text,
151 text,
154 flags,
152 flags,
155 )
153 )
156 return self.addrawrevision(
154 return self.addrawrevision(
157 rawtext,
155 rawtext,
158 transaction,
156 transaction,
159 linknode,
157 linknode,
160 p1,
158 p1,
161 p2,
159 p2,
162 node,
160 node,
163 flags,
161 flags,
164 cachedelta,
162 cachedelta,
165 _metatuple=(meta, metaoffset),
163 _metatuple=(meta, metaoffset),
166 )
164 )
167
165
168 def addrawrevision(
166 def addrawrevision(
169 self,
167 self,
170 rawtext,
168 rawtext,
171 transaction,
169 transaction,
172 linknode,
170 linknode,
173 p1,
171 p1,
174 p2,
172 p2,
175 node,
173 node,
176 flags,
174 flags,
177 cachedelta=None,
175 cachedelta=None,
178 _metatuple=None,
176 _metatuple=None,
179 ):
177 ):
180 if _metatuple:
178 if _metatuple:
181 # _metatuple: used by "addrevision" internally by remotefilelog
179 # _metatuple: used by "addrevision" internally by remotefilelog
182 # meta was parsed confidently
180 # meta was parsed confidently
183 meta, metaoffset = _metatuple
181 meta, metaoffset = _metatuple
184 else:
182 else:
185 # not from self.addrevision, but something else (repo._filecommit)
183 # not from self.addrevision, but something else (repo._filecommit)
186 # calls addrawrevision directly. remotefilelog needs to get and
184 # calls addrawrevision directly. remotefilelog needs to get and
187 # strip filelog metadata.
185 # strip filelog metadata.
188 # we don't have confidence about whether rawtext contains filelog
186 # we don't have confidence about whether rawtext contains filelog
189 # metadata or not (flag processor could replace it), so we just
187 # metadata or not (flag processor could replace it), so we just
190 # parse it as best-effort.
188 # parse it as best-effort.
191 # in LFS (flags != 0)'s case, the best way is to call LFS code to
189 # in LFS (flags != 0)'s case, the best way is to call LFS code to
192 # get the meta information, instead of storageutil.parsemeta.
190 # get the meta information, instead of storageutil.parsemeta.
193 meta, metaoffset = storageutil.parsemeta(rawtext)
191 meta, metaoffset = storageutil.parsemeta(rawtext)
194 if flags != 0:
192 if flags != 0:
195 # when flags != 0, be conservative and do not mangle rawtext, since
193 # when flags != 0, be conservative and do not mangle rawtext, since
196 # a read flag processor expects the text not being mangled at all.
194 # a read flag processor expects the text not being mangled at all.
197 metaoffset = 0
195 metaoffset = 0
198 if metaoffset:
196 if metaoffset:
199 # remotefilelog fileblob stores copy metadata in its ancestortext,
197 # remotefilelog fileblob stores copy metadata in its ancestortext,
200 # not its main blob. so we need to remove filelog metadata
198 # not its main blob. so we need to remove filelog metadata
201 # (containing copy information) from text.
199 # (containing copy information) from text.
202 blobtext = rawtext[metaoffset:]
200 blobtext = rawtext[metaoffset:]
203 else:
201 else:
204 blobtext = rawtext
202 blobtext = rawtext
205 data = self._createfileblob(
203 data = self._createfileblob(
206 blobtext, meta, flags, p1, p2, node, linknode
204 blobtext, meta, flags, p1, p2, node, linknode
207 )
205 )
208 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
206 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
209
207
210 return node
208 return node
211
209
212 def renamed(self, node):
210 def renamed(self, node):
213 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
211 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
214 p1, p2, linknode, copyfrom = ancestors[node]
212 p1, p2, linknode, copyfrom = ancestors[node]
215 if copyfrom:
213 if copyfrom:
216 return (copyfrom, p1)
214 return (copyfrom, p1)
217
215
218 return False
216 return False
219
217
220 def size(self, node):
218 def size(self, node):
221 """return the size of a given revision"""
219 """return the size of a given revision"""
222 return len(self.read(node))
220 return len(self.read(node))
223
221
224 rawsize = size
222 rawsize = size
225
223
226 def cmp(self, node, text):
224 def cmp(self, node, text):
227 """compare text with a given file revision
225 """compare text with a given file revision
228
226
229 returns True if text is different than what is stored.
227 returns True if text is different than what is stored.
230 """
228 """
231
229
232 if node == self.repo.nullid:
230 if node == self.repo.nullid:
233 return True
231 return True
234
232
235 nodetext = self.read(node)
233 nodetext = self.read(node)
236 return nodetext != text
234 return nodetext != text
237
235
238 def __nonzero__(self):
236 def __nonzero__(self):
239 return True
237 return True
240
238
241 __bool__ = __nonzero__
239 __bool__ = __nonzero__
242
240
243 def __len__(self):
241 def __len__(self):
244 if self.filename in (b'.hgtags', b'.hgsub', b'.hgsubstate'):
242 if self.filename in (b'.hgtags', b'.hgsub', b'.hgsubstate'):
245 # Global tag and subrepository support require access to the
243 # Global tag and subrepository support require access to the
246 # file history for various performance sensitive operations.
244 # file history for various performance sensitive operations.
247 # excludepattern should be used for repositories depending on
245 # excludepattern should be used for repositories depending on
248 # those features to fallback to regular filelog.
246 # those features to fallback to regular filelog.
249 return 0
247 return 0
250
248
251 raise RuntimeError(b"len not supported")
249 raise RuntimeError(b"len not supported")
252
250
253 def heads(self):
251 def heads(self):
254 # Fake heads of the filelog to satisfy hgweb.
252 # Fake heads of the filelog to satisfy hgweb.
255 return []
253 return []
256
254
257 def empty(self):
255 def empty(self):
258 return False
256 return False
259
257
260 def flags(self, node):
258 def flags(self, node):
261 if isinstance(node, int):
259 if isinstance(node, int):
262 raise error.ProgrammingError(
260 raise error.ProgrammingError(
263 b'remotefilelog does not accept integer rev for flags'
261 b'remotefilelog does not accept integer rev for flags'
264 )
262 )
265 store = self.repo.contentstore
263 store = self.repo.contentstore
266 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
264 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
267
265
268 def parents(self, node):
266 def parents(self, node):
269 if node == self.repo.nullid:
267 if node == self.repo.nullid:
270 return self.repo.nullid, self.repo.nullid
268 return self.repo.nullid, self.repo.nullid
271
269
272 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
270 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
273 p1, p2, linknode, copyfrom = ancestormap[node]
271 p1, p2, linknode, copyfrom = ancestormap[node]
274 if copyfrom:
272 if copyfrom:
275 p1 = self.repo.nullid
273 p1 = self.repo.nullid
276
274
277 return p1, p2
275 return p1, p2
278
276
279 def parentrevs(self, rev):
277 def parentrevs(self, rev):
280 # TODO(augie): this is a node and should be a rev, but for now
278 # TODO(augie): this is a node and should be a rev, but for now
281 # nothing in core seems to actually break.
279 # nothing in core seems to actually break.
282 return self.parents(rev)
280 return self.parents(rev)
283
281
284 def linknode(self, node):
282 def linknode(self, node):
285 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
283 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
286 p1, p2, linknode, copyfrom = ancestormap[node]
284 p1, p2, linknode, copyfrom = ancestormap[node]
287 return linknode
285 return linknode
288
286
289 def linkrev(self, node):
287 def linkrev(self, node):
290 return self.repo.unfiltered().changelog.rev(self.linknode(node))
288 return self.repo.unfiltered().changelog.rev(self.linknode(node))
291
289
292 def emitrevisions(
290 def emitrevisions(
293 self,
291 self,
294 nodes,
292 nodes,
295 nodesorder=None,
293 nodesorder=None,
296 revisiondata=False,
294 revisiondata=False,
297 assumehaveparentrevisions=False,
295 assumehaveparentrevisions=False,
298 deltaprevious=False,
296 deltaprevious=False,
299 deltamode=None,
297 deltamode=None,
300 sidedata_helpers=None,
298 sidedata_helpers=None,
301 debug_info=None,
299 debug_info=None,
302 ):
300 ):
303 # we don't use any of these parameters here
301 # we don't use any of these parameters here
304 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
302 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
305 del deltamode
303 del deltamode
306 prevnode = None
304 prevnode = None
307 for node in nodes:
305 for node in nodes:
308 p1, p2 = self.parents(node)
306 p1, p2 = self.parents(node)
309 if prevnode is None:
307 if prevnode is None:
310 basenode = prevnode = p1
308 basenode = prevnode = p1
311 if basenode == node:
309 if basenode == node:
312 basenode = self.repo.nullid
310 basenode = self.repo.nullid
313 if basenode != self.repo.nullid:
311 if basenode != self.repo.nullid:
314 revision = None
312 revision = None
315 delta = self.revdiff(basenode, node)
313 delta = self.revdiff(basenode, node)
316 else:
314 else:
317 revision = self.rawdata(node)
315 revision = self.rawdata(node)
318 delta = None
316 delta = None
319 yield revlog.revlogrevisiondelta(
317 yield revlog.revlogrevisiondelta(
320 node=node,
318 node=node,
321 p1node=p1,
319 p1node=p1,
322 p2node=p2,
320 p2node=p2,
323 linknode=self.linknode(node),
321 linknode=self.linknode(node),
324 basenode=basenode,
322 basenode=basenode,
325 flags=self.flags(node),
323 flags=self.flags(node),
326 baserevisionsize=None,
324 baserevisionsize=None,
327 revision=revision,
325 revision=revision,
328 delta=delta,
326 delta=delta,
329 # Sidedata is not supported yet
327 # Sidedata is not supported yet
330 sidedata=None,
328 sidedata=None,
331 # Protocol flags are not used yet
329 # Protocol flags are not used yet
332 protocol_flags=0,
330 protocol_flags=0,
333 )
331 )
334
332
335 def revdiff(self, node1, node2):
333 def revdiff(self, node1, node2):
336 return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2))
334 return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2))
337
335
338 def lookup(self, node):
336 def lookup(self, node):
339 if len(node) == 40:
337 if len(node) == 40:
340 node = bin(node)
338 node = bin(node)
341 if len(node) != 20:
339 if len(node) != 20:
342 raise error.LookupError(
340 raise error.LookupError(
343 node, self.filename, _(b'invalid lookup input')
341 node, self.filename, _(b'invalid lookup input')
344 )
342 )
345
343
346 return node
344 return node
347
345
348 def rev(self, node):
346 def rev(self, node):
349 # This is a hack to make TortoiseHG work.
347 # This is a hack to make TortoiseHG work.
350 return node
348 return node
351
349
352 def node(self, rev):
350 def node(self, rev):
353 # This is a hack.
351 # This is a hack.
354 if isinstance(rev, int):
352 if isinstance(rev, int):
355 raise error.ProgrammingError(
353 raise error.ProgrammingError(
356 b'remotefilelog does not convert integer rev to node'
354 b'remotefilelog does not convert integer rev to node'
357 )
355 )
358 return rev
356 return rev
359
357
360 def revision(self, node, raw=False):
358 def revision(self, node, raw=False):
361 """returns the revlog contents at this node.
359 """returns the revlog contents at this node.
362 this includes the meta data traditionally included in file revlogs.
360 this includes the meta data traditionally included in file revlogs.
363 this is generally only used for bundling and communicating with vanilla
361 this is generally only used for bundling and communicating with vanilla
364 hg clients.
362 hg clients.
365 """
363 """
366 if node == self.repo.nullid:
364 if node == self.repo.nullid:
367 return b""
365 return b""
368 if len(node) != 20:
366 if len(node) != 20:
369 raise error.LookupError(
367 raise error.LookupError(
370 node, self.filename, _(b'invalid revision input')
368 node, self.filename, _(b'invalid revision input')
371 )
369 )
372 if (
370 if (
373 node == self.repo.nodeconstants.wdirid
371 node == self.repo.nodeconstants.wdirid
374 or node in self.repo.nodeconstants.wdirfilenodeids
372 or node in self.repo.nodeconstants.wdirfilenodeids
375 ):
373 ):
376 raise error.WdirUnsupported
374 raise error.WdirUnsupported
377
375
378 store = self.repo.contentstore
376 store = self.repo.contentstore
379 rawtext = store.get(self.filename, node)
377 rawtext = store.get(self.filename, node)
380 if raw:
378 if raw:
381 return rawtext
379 return rawtext
382 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
380 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
383 if flags == 0:
381 if flags == 0:
384 return rawtext
382 return rawtext
385 return flagutil.processflagsread(self, rawtext, flags)[0]
383 return flagutil.processflagsread(self, rawtext, flags)[0]
386
384
387 def rawdata(self, node):
385 def rawdata(self, node):
388 return self.revision(node, raw=False)
386 return self.revision(node, raw=False)
389
387
390 def _read(self, id):
391 """reads the raw file blob from disk, cache, or server"""
392 fileservice = self.repo.fileservice
393 localcache = fileservice.localcache
394 cachekey = fileserverclient.getcachekey(
395 self.repo.name, self.filename, id
396 )
397 try:
398 return localcache.read(cachekey)
399 except KeyError:
400 pass
401
402 localkey = fileserverclient.getlocalkey(self.filename, id)
403 localpath = os.path.join(self.localpath, localkey)
404 try:
405 return shallowutil.readfile(localpath)
406 except IOError:
407 pass
408
409 fileservice.prefetch([(self.filename, id)])
410 try:
411 return localcache.read(cachekey)
412 except KeyError:
413 pass
414
415 raise error.LookupError(id, self.filename, _(b'no node'))
416
417 def ancestormap(self, node):
388 def ancestormap(self, node):
418 return self.repo.metadatastore.getancestors(self.filename, node)
389 return self.repo.metadatastore.getancestors(self.filename, node)
419
390
420 def ancestor(self, a, b):
391 def ancestor(self, a, b):
421 if a == self.repo.nullid or b == self.repo.nullid:
392 if a == self.repo.nullid or b == self.repo.nullid:
422 return self.repo.nullid
393 return self.repo.nullid
423
394
424 revmap, parentfunc = self._buildrevgraph(a, b)
395 revmap, parentfunc = self._buildrevgraph(a, b)
425 nodemap = {v: k for (k, v) in revmap.items()}
396 nodemap = {v: k for (k, v) in revmap.items()}
426
397
427 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
398 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
428 if ancs:
399 if ancs:
429 # choose a consistent winner when there's a tie
400 # choose a consistent winner when there's a tie
430 return min(map(nodemap.__getitem__, ancs))
401 return min(map(nodemap.__getitem__, ancs))
431 return self.repo.nullid
402 return self.repo.nullid
432
403
433 def commonancestorsheads(self, a, b):
404 def commonancestorsheads(self, a, b):
434 """calculate all the heads of the common ancestors of nodes a and b"""
405 """calculate all the heads of the common ancestors of nodes a and b"""
435
406
436 if a == self.repo.nullid or b == self.repo.nullid:
407 if a == self.repo.nullid or b == self.repo.nullid:
437 return self.repo.nullid
408 return self.repo.nullid
438
409
439 revmap, parentfunc = self._buildrevgraph(a, b)
410 revmap, parentfunc = self._buildrevgraph(a, b)
440 nodemap = {v: k for (k, v) in revmap.items()}
411 nodemap = {v: k for (k, v) in revmap.items()}
441
412
442 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
413 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
443 return map(nodemap.__getitem__, ancs)
414 return map(nodemap.__getitem__, ancs)
444
415
445 def _buildrevgraph(self, a, b):
416 def _buildrevgraph(self, a, b):
446 """Builds a numeric revision graph for the given two nodes.
417 """Builds a numeric revision graph for the given two nodes.
447 Returns a node->rev map and a rev->[revs] parent function.
418 Returns a node->rev map and a rev->[revs] parent function.
448 """
419 """
449 amap = self.ancestormap(a)
420 amap = self.ancestormap(a)
450 bmap = self.ancestormap(b)
421 bmap = self.ancestormap(b)
451
422
452 # Union the two maps
423 # Union the two maps
453 parentsmap = collections.defaultdict(list)
424 parentsmap = collections.defaultdict(list)
454 allparents = set()
425 allparents = set()
455 for mapping in (amap, bmap):
426 for mapping in (amap, bmap):
456 for node, pdata in mapping.items():
427 for node, pdata in mapping.items():
457 parents = parentsmap[node]
428 parents = parentsmap[node]
458 p1, p2, linknode, copyfrom = pdata
429 p1, p2, linknode, copyfrom = pdata
459 # Don't follow renames (copyfrom).
430 # Don't follow renames (copyfrom).
460 # remotefilectx.ancestor does that.
431 # remotefilectx.ancestor does that.
461 if p1 != self.repo.nullid and not copyfrom:
432 if p1 != self.repo.nullid and not copyfrom:
462 parents.append(p1)
433 parents.append(p1)
463 allparents.add(p1)
434 allparents.add(p1)
464 if p2 != self.repo.nullid:
435 if p2 != self.repo.nullid:
465 parents.append(p2)
436 parents.append(p2)
466 allparents.add(p2)
437 allparents.add(p2)
467
438
468 # Breadth first traversal to build linkrev graph
439 # Breadth first traversal to build linkrev graph
469 parentrevs = collections.defaultdict(list)
440 parentrevs = collections.defaultdict(list)
470 revmap = {}
441 revmap = {}
471 queue = collections.deque(
442 queue = collections.deque(
472 ((None, n) for n in parentsmap if n not in allparents)
443 ((None, n) for n in parentsmap if n not in allparents)
473 )
444 )
474 while queue:
445 while queue:
475 prevrev, current = queue.pop()
446 prevrev, current = queue.pop()
476 if current in revmap:
447 if current in revmap:
477 if prevrev:
448 if prevrev:
478 parentrevs[prevrev].append(revmap[current])
449 parentrevs[prevrev].append(revmap[current])
479 continue
450 continue
480
451
481 # Assign linkrevs in reverse order, so start at
452 # Assign linkrevs in reverse order, so start at
482 # len(parentsmap) and work backwards.
453 # len(parentsmap) and work backwards.
483 currentrev = len(parentsmap) - len(revmap) - 1
454 currentrev = len(parentsmap) - len(revmap) - 1
484 revmap[current] = currentrev
455 revmap[current] = currentrev
485
456
486 if prevrev:
457 if prevrev:
487 parentrevs[prevrev].append(currentrev)
458 parentrevs[prevrev].append(currentrev)
488
459
489 for parent in parentsmap.get(current):
460 for parent in parentsmap.get(current):
490 queue.appendleft((currentrev, parent))
461 queue.appendleft((currentrev, parent))
491
462
492 return revmap, parentrevs.__getitem__
463 return revmap, parentrevs.__getitem__
493
464
494 def strip(self, minlink, transaction):
465 def strip(self, minlink, transaction):
495 pass
466 pass
496
467
497 # misc unused things
468 # misc unused things
498 def files(self):
469 def files(self):
499 return []
470 return []
500
471
501 def checksize(self):
472 def checksize(self):
502 return 0, 0
473 return 0, 0
General Comments 0
You need to be logged in to leave comments. Login now