##// END OF EJS Templates
remotefilelog: remove deprecated API...
Raphaël Gomès -
r49358:bf5dc156 default
parent child Browse files
Show More
@@ -1,516 +1,504 b''
1 # remotefilelog.py - filelog implementation where filelog history is stored
1 # remotefilelog.py - filelog implementation where filelog history is stored
2 # remotely
2 # remotely
3 #
3 #
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import os
11 import os
12
12
13 from mercurial.node import bin
13 from mercurial.node import bin
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial import (
15 from mercurial import (
16 ancestor,
16 ancestor,
17 error,
17 error,
18 mdiff,
18 mdiff,
19 pycompat,
19 pycompat,
20 revlog,
20 revlog,
21 util,
22 )
21 )
23 from mercurial.utils import storageutil
22 from mercurial.utils import storageutil
24 from mercurial.revlogutils import flagutil
23 from mercurial.revlogutils import flagutil
25
24
26 from . import (
25 from . import (
27 constants,
26 constants,
28 fileserverclient,
27 fileserverclient,
29 shallowutil,
28 shallowutil,
30 )
29 )
31
30
32
31
33 class remotefilelognodemap(object):
32 class remotefilelognodemap(object):
34 def __init__(self, filename, store):
33 def __init__(self, filename, store):
35 self._filename = filename
34 self._filename = filename
36 self._store = store
35 self._store = store
37
36
38 def __contains__(self, node):
37 def __contains__(self, node):
39 missing = self._store.getmissing([(self._filename, node)])
38 missing = self._store.getmissing([(self._filename, node)])
40 return not bool(missing)
39 return not bool(missing)
41
40
42 def __get__(self, node):
41 def __get__(self, node):
43 if node not in self:
42 if node not in self:
44 raise KeyError(node)
43 raise KeyError(node)
45 return node
44 return node
46
45
47
46
48 class remotefilelog(object):
47 class remotefilelog(object):
49
48
50 _generaldelta = True
49 _generaldelta = True
51 _flagserrorclass = error.RevlogError
50 _flagserrorclass = error.RevlogError
52
51
53 def __init__(self, opener, path, repo):
52 def __init__(self, opener, path, repo):
54 self.opener = opener
53 self.opener = opener
55 self.filename = path
54 self.filename = path
56 self.repo = repo
55 self.repo = repo
57 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
56 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
58
57
59 self.version = 1
58 self.version = 1
60
59
61 self._flagprocessors = dict(flagutil.flagprocessors)
60 self._flagprocessors = dict(flagutil.flagprocessors)
62
61
63 def read(self, node):
62 def read(self, node):
64 """returns the file contents at this node"""
63 """returns the file contents at this node"""
65 t = self.revision(node)
64 t = self.revision(node)
66 if not t.startswith(b'\1\n'):
65 if not t.startswith(b'\1\n'):
67 return t
66 return t
68 s = t.index(b'\1\n', 2)
67 s = t.index(b'\1\n', 2)
69 return t[s + 2 :]
68 return t[s + 2 :]
70
69
71 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
70 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
72 # hash with the metadata, like in vanilla filelogs
71 # hash with the metadata, like in vanilla filelogs
73 hashtext = shallowutil.createrevlogtext(
72 hashtext = shallowutil.createrevlogtext(
74 text, meta.get(b'copy'), meta.get(b'copyrev')
73 text, meta.get(b'copy'), meta.get(b'copyrev')
75 )
74 )
76 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
75 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
77 return self.addrevision(
76 return self.addrevision(
78 hashtext, transaction, linknode, p1, p2, node=node
77 hashtext, transaction, linknode, p1, p2, node=node
79 )
78 )
80
79
81 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
80 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
82 # text passed to "_createfileblob" does not include filelog metadata
81 # text passed to "_createfileblob" does not include filelog metadata
83 header = shallowutil.buildfileblobheader(len(text), flags)
82 header = shallowutil.buildfileblobheader(len(text), flags)
84 data = b"%s\0%s" % (header, text)
83 data = b"%s\0%s" % (header, text)
85
84
86 realp1 = p1
85 realp1 = p1
87 copyfrom = b""
86 copyfrom = b""
88 if meta and b'copy' in meta:
87 if meta and b'copy' in meta:
89 copyfrom = meta[b'copy']
88 copyfrom = meta[b'copy']
90 realp1 = bin(meta[b'copyrev'])
89 realp1 = bin(meta[b'copyrev'])
91
90
92 data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
91 data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
93
92
94 visited = set()
93 visited = set()
95
94
96 pancestors = {}
95 pancestors = {}
97 queue = []
96 queue = []
98 if realp1 != self.repo.nullid:
97 if realp1 != self.repo.nullid:
99 p1flog = self
98 p1flog = self
100 if copyfrom:
99 if copyfrom:
101 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
100 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
102
101
103 pancestors.update(p1flog.ancestormap(realp1))
102 pancestors.update(p1flog.ancestormap(realp1))
104 queue.append(realp1)
103 queue.append(realp1)
105 visited.add(realp1)
104 visited.add(realp1)
106 if p2 != self.repo.nullid:
105 if p2 != self.repo.nullid:
107 pancestors.update(self.ancestormap(p2))
106 pancestors.update(self.ancestormap(p2))
108 queue.append(p2)
107 queue.append(p2)
109 visited.add(p2)
108 visited.add(p2)
110
109
111 ancestortext = b""
110 ancestortext = b""
112
111
113 # add the ancestors in topological order
112 # add the ancestors in topological order
114 while queue:
113 while queue:
115 c = queue.pop(0)
114 c = queue.pop(0)
116 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
115 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
117
116
118 pacopyfrom = pacopyfrom or b''
117 pacopyfrom = pacopyfrom or b''
119 ancestortext += b"%s%s%s%s%s\0" % (
118 ancestortext += b"%s%s%s%s%s\0" % (
120 c,
119 c,
121 pa1,
120 pa1,
122 pa2,
121 pa2,
123 ancestorlinknode,
122 ancestorlinknode,
124 pacopyfrom,
123 pacopyfrom,
125 )
124 )
126
125
127 if pa1 != self.repo.nullid and pa1 not in visited:
126 if pa1 != self.repo.nullid and pa1 not in visited:
128 queue.append(pa1)
127 queue.append(pa1)
129 visited.add(pa1)
128 visited.add(pa1)
130 if pa2 != self.repo.nullid and pa2 not in visited:
129 if pa2 != self.repo.nullid and pa2 not in visited:
131 queue.append(pa2)
130 queue.append(pa2)
132 visited.add(pa2)
131 visited.add(pa2)
133
132
134 data += ancestortext
133 data += ancestortext
135
134
136 return data
135 return data
137
136
138 def addrevision(
137 def addrevision(
139 self,
138 self,
140 text,
139 text,
141 transaction,
140 transaction,
142 linknode,
141 linknode,
143 p1,
142 p1,
144 p2,
143 p2,
145 cachedelta=None,
144 cachedelta=None,
146 node=None,
145 node=None,
147 flags=revlog.REVIDX_DEFAULT_FLAGS,
146 flags=revlog.REVIDX_DEFAULT_FLAGS,
148 sidedata=None,
147 sidedata=None,
149 ):
148 ):
150 # text passed to "addrevision" includes hg filelog metadata header
149 # text passed to "addrevision" includes hg filelog metadata header
151 if node is None:
150 if node is None:
152 node = storageutil.hashrevisionsha1(text, p1, p2)
151 node = storageutil.hashrevisionsha1(text, p1, p2)
153
152
154 meta, metaoffset = storageutil.parsemeta(text)
153 meta, metaoffset = storageutil.parsemeta(text)
155 rawtext, validatehash = flagutil.processflagswrite(
154 rawtext, validatehash = flagutil.processflagswrite(
156 self,
155 self,
157 text,
156 text,
158 flags,
157 flags,
159 )
158 )
160 return self.addrawrevision(
159 return self.addrawrevision(
161 rawtext,
160 rawtext,
162 transaction,
161 transaction,
163 linknode,
162 linknode,
164 p1,
163 p1,
165 p2,
164 p2,
166 node,
165 node,
167 flags,
166 flags,
168 cachedelta,
167 cachedelta,
169 _metatuple=(meta, metaoffset),
168 _metatuple=(meta, metaoffset),
170 )
169 )
171
170
172 def addrawrevision(
171 def addrawrevision(
173 self,
172 self,
174 rawtext,
173 rawtext,
175 transaction,
174 transaction,
176 linknode,
175 linknode,
177 p1,
176 p1,
178 p2,
177 p2,
179 node,
178 node,
180 flags,
179 flags,
181 cachedelta=None,
180 cachedelta=None,
182 _metatuple=None,
181 _metatuple=None,
183 ):
182 ):
184 if _metatuple:
183 if _metatuple:
185 # _metatuple: used by "addrevision" internally by remotefilelog
184 # _metatuple: used by "addrevision" internally by remotefilelog
186 # meta was parsed confidently
185 # meta was parsed confidently
187 meta, metaoffset = _metatuple
186 meta, metaoffset = _metatuple
188 else:
187 else:
189 # not from self.addrevision, but something else (repo._filecommit)
188 # not from self.addrevision, but something else (repo._filecommit)
190 # calls addrawrevision directly. remotefilelog needs to get and
189 # calls addrawrevision directly. remotefilelog needs to get and
191 # strip filelog metadata.
190 # strip filelog metadata.
192 # we don't have confidence about whether rawtext contains filelog
191 # we don't have confidence about whether rawtext contains filelog
193 # metadata or not (flag processor could replace it), so we just
192 # metadata or not (flag processor could replace it), so we just
194 # parse it as best-effort.
193 # parse it as best-effort.
195 # in LFS (flags != 0)'s case, the best way is to call LFS code to
194 # in LFS (flags != 0)'s case, the best way is to call LFS code to
196 # get the meta information, instead of storageutil.parsemeta.
195 # get the meta information, instead of storageutil.parsemeta.
197 meta, metaoffset = storageutil.parsemeta(rawtext)
196 meta, metaoffset = storageutil.parsemeta(rawtext)
198 if flags != 0:
197 if flags != 0:
199 # when flags != 0, be conservative and do not mangle rawtext, since
198 # when flags != 0, be conservative and do not mangle rawtext, since
200 # a read flag processor expects the text not being mangled at all.
199 # a read flag processor expects the text not being mangled at all.
201 metaoffset = 0
200 metaoffset = 0
202 if metaoffset:
201 if metaoffset:
203 # remotefilelog fileblob stores copy metadata in its ancestortext,
202 # remotefilelog fileblob stores copy metadata in its ancestortext,
204 # not its main blob. so we need to remove filelog metadata
203 # not its main blob. so we need to remove filelog metadata
205 # (containing copy information) from text.
204 # (containing copy information) from text.
206 blobtext = rawtext[metaoffset:]
205 blobtext = rawtext[metaoffset:]
207 else:
206 else:
208 blobtext = rawtext
207 blobtext = rawtext
209 data = self._createfileblob(
208 data = self._createfileblob(
210 blobtext, meta, flags, p1, p2, node, linknode
209 blobtext, meta, flags, p1, p2, node, linknode
211 )
210 )
212 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
211 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
213
212
214 return node
213 return node
215
214
216 def renamed(self, node):
215 def renamed(self, node):
217 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
216 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
218 p1, p2, linknode, copyfrom = ancestors[node]
217 p1, p2, linknode, copyfrom = ancestors[node]
219 if copyfrom:
218 if copyfrom:
220 return (copyfrom, p1)
219 return (copyfrom, p1)
221
220
222 return False
221 return False
223
222
224 def size(self, node):
223 def size(self, node):
225 """return the size of a given revision"""
224 """return the size of a given revision"""
226 return len(self.read(node))
225 return len(self.read(node))
227
226
228 rawsize = size
227 rawsize = size
229
228
230 def cmp(self, node, text):
229 def cmp(self, node, text):
231 """compare text with a given file revision
230 """compare text with a given file revision
232
231
233 returns True if text is different than what is stored.
232 returns True if text is different than what is stored.
234 """
233 """
235
234
236 if node == self.repo.nullid:
235 if node == self.repo.nullid:
237 return True
236 return True
238
237
239 nodetext = self.read(node)
238 nodetext = self.read(node)
240 return nodetext != text
239 return nodetext != text
241
240
242 def __nonzero__(self):
241 def __nonzero__(self):
243 return True
242 return True
244
243
245 __bool__ = __nonzero__
244 __bool__ = __nonzero__
246
245
247 def __len__(self):
246 def __len__(self):
248 if self.filename == b'.hgtags':
247 if self.filename == b'.hgtags':
249 # The length of .hgtags is used to fast path tag checking.
248 # The length of .hgtags is used to fast path tag checking.
250 # remotefilelog doesn't support .hgtags since the entire .hgtags
249 # remotefilelog doesn't support .hgtags since the entire .hgtags
251 # history is needed. Use the excludepattern setting to make
250 # history is needed. Use the excludepattern setting to make
252 # .hgtags a normal filelog.
251 # .hgtags a normal filelog.
253 return 0
252 return 0
254
253
255 raise RuntimeError(b"len not supported")
254 raise RuntimeError(b"len not supported")
256
255
257 def heads(self):
256 def heads(self):
258 # Fake heads of the filelog to satisfy hgweb.
257 # Fake heads of the filelog to satisfy hgweb.
259 return []
258 return []
260
259
261 def empty(self):
260 def empty(self):
262 return False
261 return False
263
262
264 def flags(self, node):
263 def flags(self, node):
265 if isinstance(node, int):
264 if isinstance(node, int):
266 raise error.ProgrammingError(
265 raise error.ProgrammingError(
267 b'remotefilelog does not accept integer rev for flags'
266 b'remotefilelog does not accept integer rev for flags'
268 )
267 )
269 store = self.repo.contentstore
268 store = self.repo.contentstore
270 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
269 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
271
270
272 def parents(self, node):
271 def parents(self, node):
273 if node == self.repo.nullid:
272 if node == self.repo.nullid:
274 return self.repo.nullid, self.repo.nullid
273 return self.repo.nullid, self.repo.nullid
275
274
276 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
275 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
277 p1, p2, linknode, copyfrom = ancestormap[node]
276 p1, p2, linknode, copyfrom = ancestormap[node]
278 if copyfrom:
277 if copyfrom:
279 p1 = self.repo.nullid
278 p1 = self.repo.nullid
280
279
281 return p1, p2
280 return p1, p2
282
281
283 def parentrevs(self, rev):
282 def parentrevs(self, rev):
284 # TODO(augie): this is a node and should be a rev, but for now
283 # TODO(augie): this is a node and should be a rev, but for now
285 # nothing in core seems to actually break.
284 # nothing in core seems to actually break.
286 return self.parents(rev)
285 return self.parents(rev)
287
286
288 def linknode(self, node):
287 def linknode(self, node):
289 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
288 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
290 p1, p2, linknode, copyfrom = ancestormap[node]
289 p1, p2, linknode, copyfrom = ancestormap[node]
291 return linknode
290 return linknode
292
291
293 def linkrev(self, node):
292 def linkrev(self, node):
294 return self.repo.unfiltered().changelog.rev(self.linknode(node))
293 return self.repo.unfiltered().changelog.rev(self.linknode(node))
295
294
296 def emitrevisions(
295 def emitrevisions(
297 self,
296 self,
298 nodes,
297 nodes,
299 nodesorder=None,
298 nodesorder=None,
300 revisiondata=False,
299 revisiondata=False,
301 assumehaveparentrevisions=False,
300 assumehaveparentrevisions=False,
302 deltaprevious=False,
301 deltaprevious=False,
303 deltamode=None,
302 deltamode=None,
304 sidedata_helpers=None,
303 sidedata_helpers=None,
305 ):
304 ):
306 # we don't use any of these parameters here
305 # we don't use any of these parameters here
307 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
306 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
308 del deltamode
307 del deltamode
309 prevnode = None
308 prevnode = None
310 for node in nodes:
309 for node in nodes:
311 p1, p2 = self.parents(node)
310 p1, p2 = self.parents(node)
312 if prevnode is None:
311 if prevnode is None:
313 basenode = prevnode = p1
312 basenode = prevnode = p1
314 if basenode == node:
313 if basenode == node:
315 basenode = self.repo.nullid
314 basenode = self.repo.nullid
316 if basenode != self.repo.nullid:
315 if basenode != self.repo.nullid:
317 revision = None
316 revision = None
318 delta = self.revdiff(basenode, node)
317 delta = self.revdiff(basenode, node)
319 else:
318 else:
320 revision = self.rawdata(node)
319 revision = self.rawdata(node)
321 delta = None
320 delta = None
322 yield revlog.revlogrevisiondelta(
321 yield revlog.revlogrevisiondelta(
323 node=node,
322 node=node,
324 p1node=p1,
323 p1node=p1,
325 p2node=p2,
324 p2node=p2,
326 linknode=self.linknode(node),
325 linknode=self.linknode(node),
327 basenode=basenode,
326 basenode=basenode,
328 flags=self.flags(node),
327 flags=self.flags(node),
329 baserevisionsize=None,
328 baserevisionsize=None,
330 revision=revision,
329 revision=revision,
331 delta=delta,
330 delta=delta,
332 # Sidedata is not supported yet
331 # Sidedata is not supported yet
333 sidedata=None,
332 sidedata=None,
334 # Protocol flags are not used yet
333 # Protocol flags are not used yet
335 protocol_flags=0,
334 protocol_flags=0,
336 )
335 )
337
336
338 def revdiff(self, node1, node2):
337 def revdiff(self, node1, node2):
339 return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2))
338 return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2))
340
339
341 def lookup(self, node):
340 def lookup(self, node):
342 if len(node) == 40:
341 if len(node) == 40:
343 node = bin(node)
342 node = bin(node)
344 if len(node) != 20:
343 if len(node) != 20:
345 raise error.LookupError(
344 raise error.LookupError(
346 node, self.filename, _(b'invalid lookup input')
345 node, self.filename, _(b'invalid lookup input')
347 )
346 )
348
347
349 return node
348 return node
350
349
351 def rev(self, node):
350 def rev(self, node):
352 # This is a hack to make TortoiseHG work.
351 # This is a hack to make TortoiseHG work.
353 return node
352 return node
354
353
355 def node(self, rev):
354 def node(self, rev):
356 # This is a hack.
355 # This is a hack.
357 if isinstance(rev, int):
356 if isinstance(rev, int):
358 raise error.ProgrammingError(
357 raise error.ProgrammingError(
359 b'remotefilelog does not convert integer rev to node'
358 b'remotefilelog does not convert integer rev to node'
360 )
359 )
361 return rev
360 return rev
362
361
363 def _processflags(self, text, flags, operation, raw=False):
364 """deprecated entry point to access flag processors"""
365 msg = b'_processflag(...) use the specialized variant'
366 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
367 if raw:
368 return text, flagutil.processflagsraw(self, text, flags)
369 elif operation == b'read':
370 return flagutil.processflagsread(self, text, flags)
371 else: # write operation
372 return flagutil.processflagswrite(self, text, flags)
373
374 def revision(self, node, raw=False):
362 def revision(self, node, raw=False):
375 """returns the revlog contents at this node.
363 """returns the revlog contents at this node.
376 this includes the meta data traditionally included in file revlogs.
364 this includes the meta data traditionally included in file revlogs.
377 this is generally only used for bundling and communicating with vanilla
365 this is generally only used for bundling and communicating with vanilla
378 hg clients.
366 hg clients.
379 """
367 """
380 if node == self.repo.nullid:
368 if node == self.repo.nullid:
381 return b""
369 return b""
382 if len(node) != 20:
370 if len(node) != 20:
383 raise error.LookupError(
371 raise error.LookupError(
384 node, self.filename, _(b'invalid revision input')
372 node, self.filename, _(b'invalid revision input')
385 )
373 )
386 if (
374 if (
387 node == self.repo.nodeconstants.wdirid
375 node == self.repo.nodeconstants.wdirid
388 or node in self.repo.nodeconstants.wdirfilenodeids
376 or node in self.repo.nodeconstants.wdirfilenodeids
389 ):
377 ):
390 raise error.WdirUnsupported
378 raise error.WdirUnsupported
391
379
392 store = self.repo.contentstore
380 store = self.repo.contentstore
393 rawtext = store.get(self.filename, node)
381 rawtext = store.get(self.filename, node)
394 if raw:
382 if raw:
395 return rawtext
383 return rawtext
396 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
384 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
397 if flags == 0:
385 if flags == 0:
398 return rawtext
386 return rawtext
399 return flagutil.processflagsread(self, rawtext, flags)[0]
387 return flagutil.processflagsread(self, rawtext, flags)[0]
400
388
401 def rawdata(self, node):
389 def rawdata(self, node):
402 return self.revision(node, raw=False)
390 return self.revision(node, raw=False)
403
391
404 def _read(self, id):
392 def _read(self, id):
405 """reads the raw file blob from disk, cache, or server"""
393 """reads the raw file blob from disk, cache, or server"""
406 fileservice = self.repo.fileservice
394 fileservice = self.repo.fileservice
407 localcache = fileservice.localcache
395 localcache = fileservice.localcache
408 cachekey = fileserverclient.getcachekey(
396 cachekey = fileserverclient.getcachekey(
409 self.repo.name, self.filename, id
397 self.repo.name, self.filename, id
410 )
398 )
411 try:
399 try:
412 return localcache.read(cachekey)
400 return localcache.read(cachekey)
413 except KeyError:
401 except KeyError:
414 pass
402 pass
415
403
416 localkey = fileserverclient.getlocalkey(self.filename, id)
404 localkey = fileserverclient.getlocalkey(self.filename, id)
417 localpath = os.path.join(self.localpath, localkey)
405 localpath = os.path.join(self.localpath, localkey)
418 try:
406 try:
419 return shallowutil.readfile(localpath)
407 return shallowutil.readfile(localpath)
420 except IOError:
408 except IOError:
421 pass
409 pass
422
410
423 fileservice.prefetch([(self.filename, id)])
411 fileservice.prefetch([(self.filename, id)])
424 try:
412 try:
425 return localcache.read(cachekey)
413 return localcache.read(cachekey)
426 except KeyError:
414 except KeyError:
427 pass
415 pass
428
416
429 raise error.LookupError(id, self.filename, _(b'no node'))
417 raise error.LookupError(id, self.filename, _(b'no node'))
430
418
431 def ancestormap(self, node):
419 def ancestormap(self, node):
432 return self.repo.metadatastore.getancestors(self.filename, node)
420 return self.repo.metadatastore.getancestors(self.filename, node)
433
421
434 def ancestor(self, a, b):
422 def ancestor(self, a, b):
435 if a == self.repo.nullid or b == self.repo.nullid:
423 if a == self.repo.nullid or b == self.repo.nullid:
436 return self.repo.nullid
424 return self.repo.nullid
437
425
438 revmap, parentfunc = self._buildrevgraph(a, b)
426 revmap, parentfunc = self._buildrevgraph(a, b)
439 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
427 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
440
428
441 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
429 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
442 if ancs:
430 if ancs:
443 # choose a consistent winner when there's a tie
431 # choose a consistent winner when there's a tie
444 return min(map(nodemap.__getitem__, ancs))
432 return min(map(nodemap.__getitem__, ancs))
445 return self.repo.nullid
433 return self.repo.nullid
446
434
447 def commonancestorsheads(self, a, b):
435 def commonancestorsheads(self, a, b):
448 """calculate all the heads of the common ancestors of nodes a and b"""
436 """calculate all the heads of the common ancestors of nodes a and b"""
449
437
450 if a == self.repo.nullid or b == self.repo.nullid:
438 if a == self.repo.nullid or b == self.repo.nullid:
451 return self.repo.nullid
439 return self.repo.nullid
452
440
453 revmap, parentfunc = self._buildrevgraph(a, b)
441 revmap, parentfunc = self._buildrevgraph(a, b)
454 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
442 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
455
443
456 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
444 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
457 return map(nodemap.__getitem__, ancs)
445 return map(nodemap.__getitem__, ancs)
458
446
459 def _buildrevgraph(self, a, b):
447 def _buildrevgraph(self, a, b):
460 """Builds a numeric revision graph for the given two nodes.
448 """Builds a numeric revision graph for the given two nodes.
461 Returns a node->rev map and a rev->[revs] parent function.
449 Returns a node->rev map and a rev->[revs] parent function.
462 """
450 """
463 amap = self.ancestormap(a)
451 amap = self.ancestormap(a)
464 bmap = self.ancestormap(b)
452 bmap = self.ancestormap(b)
465
453
466 # Union the two maps
454 # Union the two maps
467 parentsmap = collections.defaultdict(list)
455 parentsmap = collections.defaultdict(list)
468 allparents = set()
456 allparents = set()
469 for mapping in (amap, bmap):
457 for mapping in (amap, bmap):
470 for node, pdata in pycompat.iteritems(mapping):
458 for node, pdata in pycompat.iteritems(mapping):
471 parents = parentsmap[node]
459 parents = parentsmap[node]
472 p1, p2, linknode, copyfrom = pdata
460 p1, p2, linknode, copyfrom = pdata
473 # Don't follow renames (copyfrom).
461 # Don't follow renames (copyfrom).
474 # remotefilectx.ancestor does that.
462 # remotefilectx.ancestor does that.
475 if p1 != self.repo.nullid and not copyfrom:
463 if p1 != self.repo.nullid and not copyfrom:
476 parents.append(p1)
464 parents.append(p1)
477 allparents.add(p1)
465 allparents.add(p1)
478 if p2 != self.repo.nullid:
466 if p2 != self.repo.nullid:
479 parents.append(p2)
467 parents.append(p2)
480 allparents.add(p2)
468 allparents.add(p2)
481
469
482 # Breadth first traversal to build linkrev graph
470 # Breadth first traversal to build linkrev graph
483 parentrevs = collections.defaultdict(list)
471 parentrevs = collections.defaultdict(list)
484 revmap = {}
472 revmap = {}
485 queue = collections.deque(
473 queue = collections.deque(
486 ((None, n) for n in parentsmap if n not in allparents)
474 ((None, n) for n in parentsmap if n not in allparents)
487 )
475 )
488 while queue:
476 while queue:
489 prevrev, current = queue.pop()
477 prevrev, current = queue.pop()
490 if current in revmap:
478 if current in revmap:
491 if prevrev:
479 if prevrev:
492 parentrevs[prevrev].append(revmap[current])
480 parentrevs[prevrev].append(revmap[current])
493 continue
481 continue
494
482
495 # Assign linkrevs in reverse order, so start at
483 # Assign linkrevs in reverse order, so start at
496 # len(parentsmap) and work backwards.
484 # len(parentsmap) and work backwards.
497 currentrev = len(parentsmap) - len(revmap) - 1
485 currentrev = len(parentsmap) - len(revmap) - 1
498 revmap[current] = currentrev
486 revmap[current] = currentrev
499
487
500 if prevrev:
488 if prevrev:
501 parentrevs[prevrev].append(currentrev)
489 parentrevs[prevrev].append(currentrev)
502
490
503 for parent in parentsmap.get(current):
491 for parent in parentsmap.get(current):
504 queue.appendleft((currentrev, parent))
492 queue.appendleft((currentrev, parent))
505
493
506 return revmap, parentrevs.__getitem__
494 return revmap, parentrevs.__getitem__
507
495
508 def strip(self, minlink, transaction):
496 def strip(self, minlink, transaction):
509 pass
497 pass
510
498
511 # misc unused things
499 # misc unused things
512 def files(self):
500 def files(self):
513 return []
501 return []
514
502
515 def checksize(self):
503 def checksize(self):
516 return 0, 0
504 return 0, 0
General Comments 0
You need to be logged in to leave comments. Login now