##// END OF EJS Templates
flagutil: use the new mixin use in remotefilelog...
marmoute -
r43141:a5c08896 default
parent child Browse files
Show More
@@ -1,462 +1,443 b''
1 # remotefilelog.py - filelog implementation where filelog history is stored
1 # remotefilelog.py - filelog implementation where filelog history is stored
2 # remotely
2 # remotely
3 #
3 #
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import os
11 import os
12
12
13 from mercurial.node import (
13 from mercurial.node import (
14 bin,
14 bin,
15 nullid,
15 nullid,
16 wdirfilenodeids,
16 wdirfilenodeids,
17 wdirid,
17 wdirid,
18 )
18 )
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20 from mercurial import (
20 from mercurial import (
21 ancestor,
21 ancestor,
22 error,
22 error,
23 mdiff,
23 mdiff,
24 revlog,
24 revlog,
25 )
25 )
26 from mercurial.utils import storageutil
26 from mercurial.utils import storageutil
27 from mercurial.revlogutils import flagutil
27
28
28 from . import (
29 from . import (
29 constants,
30 constants,
30 fileserverclient,
31 fileserverclient,
31 shallowutil,
32 shallowutil,
32 )
33 )
33
34
34 class remotefilelognodemap(object):
35 class remotefilelognodemap(object):
35 def __init__(self, filename, store):
36 def __init__(self, filename, store):
36 self._filename = filename
37 self._filename = filename
37 self._store = store
38 self._store = store
38
39
39 def __contains__(self, node):
40 def __contains__(self, node):
40 missing = self._store.getmissing([(self._filename, node)])
41 missing = self._store.getmissing([(self._filename, node)])
41 return not bool(missing)
42 return not bool(missing)
42
43
43 def __get__(self, node):
44 def __get__(self, node):
44 if node not in self:
45 if node not in self:
45 raise KeyError(node)
46 raise KeyError(node)
46 return node
47 return node
47
48
48 class remotefilelog(object):
49 class remotefilelog(flagutil.flagprocessorsmixin):
49
50
50 _generaldelta = True
51 _generaldelta = True
51
52
52 def __init__(self, opener, path, repo):
53 def __init__(self, opener, path, repo):
53 self.opener = opener
54 self.opener = opener
54 self.filename = path
55 self.filename = path
55 self.repo = repo
56 self.repo = repo
56 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
57 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
57
58
58 self.version = 1
59 self.version = 1
59
60
61 self._flagprocessors = dict(flagutil.flagprocessors)
62
60 def read(self, node):
63 def read(self, node):
61 """returns the file contents at this node"""
64 """returns the file contents at this node"""
62 t = self.revision(node)
65 t = self.revision(node)
63 if not t.startswith('\1\n'):
66 if not t.startswith('\1\n'):
64 return t
67 return t
65 s = t.index('\1\n', 2)
68 s = t.index('\1\n', 2)
66 return t[s + 2:]
69 return t[s + 2:]
67
70
68 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
71 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
69 # hash with the metadata, like in vanilla filelogs
72 # hash with the metadata, like in vanilla filelogs
70 hashtext = shallowutil.createrevlogtext(text, meta.get('copy'),
73 hashtext = shallowutil.createrevlogtext(text, meta.get('copy'),
71 meta.get('copyrev'))
74 meta.get('copyrev'))
72 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
75 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
73 return self.addrevision(hashtext, transaction, linknode, p1, p2,
76 return self.addrevision(hashtext, transaction, linknode, p1, p2,
74 node=node)
77 node=node)
75
78
76 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
79 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
77 # text passed to "_createfileblob" does not include filelog metadata
80 # text passed to "_createfileblob" does not include filelog metadata
78 header = shallowutil.buildfileblobheader(len(text), flags)
81 header = shallowutil.buildfileblobheader(len(text), flags)
79 data = "%s\0%s" % (header, text)
82 data = "%s\0%s" % (header, text)
80
83
81 realp1 = p1
84 realp1 = p1
82 copyfrom = ""
85 copyfrom = ""
83 if meta and 'copy' in meta:
86 if meta and 'copy' in meta:
84 copyfrom = meta['copy']
87 copyfrom = meta['copy']
85 realp1 = bin(meta['copyrev'])
88 realp1 = bin(meta['copyrev'])
86
89
87 data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
90 data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
88
91
89 visited = set()
92 visited = set()
90
93
91 pancestors = {}
94 pancestors = {}
92 queue = []
95 queue = []
93 if realp1 != nullid:
96 if realp1 != nullid:
94 p1flog = self
97 p1flog = self
95 if copyfrom:
98 if copyfrom:
96 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
99 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
97
100
98 pancestors.update(p1flog.ancestormap(realp1))
101 pancestors.update(p1flog.ancestormap(realp1))
99 queue.append(realp1)
102 queue.append(realp1)
100 visited.add(realp1)
103 visited.add(realp1)
101 if p2 != nullid:
104 if p2 != nullid:
102 pancestors.update(self.ancestormap(p2))
105 pancestors.update(self.ancestormap(p2))
103 queue.append(p2)
106 queue.append(p2)
104 visited.add(p2)
107 visited.add(p2)
105
108
106 ancestortext = ""
109 ancestortext = ""
107
110
108 # add the ancestors in topological order
111 # add the ancestors in topological order
109 while queue:
112 while queue:
110 c = queue.pop(0)
113 c = queue.pop(0)
111 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
114 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
112
115
113 pacopyfrom = pacopyfrom or ''
116 pacopyfrom = pacopyfrom or ''
114 ancestortext += "%s%s%s%s%s\0" % (
117 ancestortext += "%s%s%s%s%s\0" % (
115 c, pa1, pa2, ancestorlinknode, pacopyfrom)
118 c, pa1, pa2, ancestorlinknode, pacopyfrom)
116
119
117 if pa1 != nullid and pa1 not in visited:
120 if pa1 != nullid and pa1 not in visited:
118 queue.append(pa1)
121 queue.append(pa1)
119 visited.add(pa1)
122 visited.add(pa1)
120 if pa2 != nullid and pa2 not in visited:
123 if pa2 != nullid and pa2 not in visited:
121 queue.append(pa2)
124 queue.append(pa2)
122 visited.add(pa2)
125 visited.add(pa2)
123
126
124 data += ancestortext
127 data += ancestortext
125
128
126 return data
129 return data
127
130
128 def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None,
131 def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None,
129 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS):
132 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS):
130 # text passed to "addrevision" includes hg filelog metadata header
133 # text passed to "addrevision" includes hg filelog metadata header
131 if node is None:
134 if node is None:
132 node = storageutil.hashrevisionsha1(text, p1, p2)
135 node = storageutil.hashrevisionsha1(text, p1, p2)
133
136
134 meta, metaoffset = storageutil.parsemeta(text)
137 meta, metaoffset = storageutil.parsemeta(text)
135 rawtext, validatehash = self._processflags(text, flags, 'write')
138 rawtext, validatehash = self._processflags(text, flags, 'write')
136 return self.addrawrevision(rawtext, transaction, linknode, p1, p2,
139 return self.addrawrevision(rawtext, transaction, linknode, p1, p2,
137 node, flags, cachedelta,
140 node, flags, cachedelta,
138 _metatuple=(meta, metaoffset))
141 _metatuple=(meta, metaoffset))
139
142
140 def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node,
143 def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node,
141 flags, cachedelta=None, _metatuple=None):
144 flags, cachedelta=None, _metatuple=None):
142 if _metatuple:
145 if _metatuple:
143 # _metatuple: used by "addrevision" internally by remotefilelog
146 # _metatuple: used by "addrevision" internally by remotefilelog
144 # meta was parsed confidently
147 # meta was parsed confidently
145 meta, metaoffset = _metatuple
148 meta, metaoffset = _metatuple
146 else:
149 else:
147 # not from self.addrevision, but something else (repo._filecommit)
150 # not from self.addrevision, but something else (repo._filecommit)
148 # calls addrawrevision directly. remotefilelog needs to get and
151 # calls addrawrevision directly. remotefilelog needs to get and
149 # strip filelog metadata.
152 # strip filelog metadata.
150 # we don't have confidence about whether rawtext contains filelog
153 # we don't have confidence about whether rawtext contains filelog
151 # metadata or not (flag processor could replace it), so we just
154 # metadata or not (flag processor could replace it), so we just
152 # parse it as best-effort.
155 # parse it as best-effort.
153 # in LFS (flags != 0)'s case, the best way is to call LFS code to
156 # in LFS (flags != 0)'s case, the best way is to call LFS code to
154 # get the meta information, instead of storageutil.parsemeta.
157 # get the meta information, instead of storageutil.parsemeta.
155 meta, metaoffset = storageutil.parsemeta(rawtext)
158 meta, metaoffset = storageutil.parsemeta(rawtext)
156 if flags != 0:
159 if flags != 0:
157 # when flags != 0, be conservative and do not mangle rawtext, since
160 # when flags != 0, be conservative and do not mangle rawtext, since
158 # a read flag processor expects the text not being mangled at all.
161 # a read flag processor expects the text not being mangled at all.
159 metaoffset = 0
162 metaoffset = 0
160 if metaoffset:
163 if metaoffset:
161 # remotefilelog fileblob stores copy metadata in its ancestortext,
164 # remotefilelog fileblob stores copy metadata in its ancestortext,
162 # not its main blob. so we need to remove filelog metadata
165 # not its main blob. so we need to remove filelog metadata
163 # (containing copy information) from text.
166 # (containing copy information) from text.
164 blobtext = rawtext[metaoffset:]
167 blobtext = rawtext[metaoffset:]
165 else:
168 else:
166 blobtext = rawtext
169 blobtext = rawtext
167 data = self._createfileblob(blobtext, meta, flags, p1, p2, node,
170 data = self._createfileblob(blobtext, meta, flags, p1, p2, node,
168 linknode)
171 linknode)
169 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
172 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
170
173
171 return node
174 return node
172
175
173 def renamed(self, node):
176 def renamed(self, node):
174 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
177 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
175 p1, p2, linknode, copyfrom = ancestors[node]
178 p1, p2, linknode, copyfrom = ancestors[node]
176 if copyfrom:
179 if copyfrom:
177 return (copyfrom, p1)
180 return (copyfrom, p1)
178
181
179 return False
182 return False
180
183
181 def size(self, node):
184 def size(self, node):
182 """return the size of a given revision"""
185 """return the size of a given revision"""
183 return len(self.read(node))
186 return len(self.read(node))
184
187
185 rawsize = size
188 rawsize = size
186
189
187 def cmp(self, node, text):
190 def cmp(self, node, text):
188 """compare text with a given file revision
191 """compare text with a given file revision
189
192
190 returns True if text is different than what is stored.
193 returns True if text is different than what is stored.
191 """
194 """
192
195
193 if node == nullid:
196 if node == nullid:
194 return True
197 return True
195
198
196 nodetext = self.read(node)
199 nodetext = self.read(node)
197 return nodetext != text
200 return nodetext != text
198
201
199 def __nonzero__(self):
202 def __nonzero__(self):
200 return True
203 return True
201
204
202 __bool__ = __nonzero__
205 __bool__ = __nonzero__
203
206
204 def __len__(self):
207 def __len__(self):
205 if self.filename == '.hgtags':
208 if self.filename == '.hgtags':
206 # The length of .hgtags is used to fast path tag checking.
209 # The length of .hgtags is used to fast path tag checking.
207 # remotefilelog doesn't support .hgtags since the entire .hgtags
210 # remotefilelog doesn't support .hgtags since the entire .hgtags
208 # history is needed. Use the excludepattern setting to make
211 # history is needed. Use the excludepattern setting to make
209 # .hgtags a normal filelog.
212 # .hgtags a normal filelog.
210 return 0
213 return 0
211
214
212 raise RuntimeError("len not supported")
215 raise RuntimeError("len not supported")
213
216
214 def empty(self):
217 def empty(self):
215 return False
218 return False
216
219
217 def flags(self, node):
220 def flags(self, node):
218 if isinstance(node, int):
221 if isinstance(node, int):
219 raise error.ProgrammingError(
222 raise error.ProgrammingError(
220 'remotefilelog does not accept integer rev for flags')
223 'remotefilelog does not accept integer rev for flags')
221 store = self.repo.contentstore
224 store = self.repo.contentstore
222 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
225 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
223
226
224 def parents(self, node):
227 def parents(self, node):
225 if node == nullid:
228 if node == nullid:
226 return nullid, nullid
229 return nullid, nullid
227
230
228 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
231 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
229 p1, p2, linknode, copyfrom = ancestormap[node]
232 p1, p2, linknode, copyfrom = ancestormap[node]
230 if copyfrom:
233 if copyfrom:
231 p1 = nullid
234 p1 = nullid
232
235
233 return p1, p2
236 return p1, p2
234
237
235 def parentrevs(self, rev):
238 def parentrevs(self, rev):
236 # TODO(augie): this is a node and should be a rev, but for now
239 # TODO(augie): this is a node and should be a rev, but for now
237 # nothing in core seems to actually break.
240 # nothing in core seems to actually break.
238 return self.parents(rev)
241 return self.parents(rev)
239
242
240 def linknode(self, node):
243 def linknode(self, node):
241 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
244 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
242 p1, p2, linknode, copyfrom = ancestormap[node]
245 p1, p2, linknode, copyfrom = ancestormap[node]
243 return linknode
246 return linknode
244
247
245 def linkrev(self, node):
248 def linkrev(self, node):
246 return self.repo.unfiltered().changelog.rev(self.linknode(node))
249 return self.repo.unfiltered().changelog.rev(self.linknode(node))
247
250
248 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
251 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
249 assumehaveparentrevisions=False, deltaprevious=False,
252 assumehaveparentrevisions=False, deltaprevious=False,
250 deltamode=None):
253 deltamode=None):
251 # we don't use any of these parameters here
254 # we don't use any of these parameters here
252 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
255 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
253 del deltamode
256 del deltamode
254 prevnode = None
257 prevnode = None
255 for node in nodes:
258 for node in nodes:
256 p1, p2 = self.parents(node)
259 p1, p2 = self.parents(node)
257 if prevnode is None:
260 if prevnode is None:
258 basenode = prevnode = p1
261 basenode = prevnode = p1
259 if basenode == node:
262 if basenode == node:
260 basenode = nullid
263 basenode = nullid
261 if basenode != nullid:
264 if basenode != nullid:
262 revision = None
265 revision = None
263 delta = self.revdiff(basenode, node)
266 delta = self.revdiff(basenode, node)
264 else:
267 else:
265 revision = self.rawdata(node)
268 revision = self.rawdata(node)
266 delta = None
269 delta = None
267 yield revlog.revlogrevisiondelta(
270 yield revlog.revlogrevisiondelta(
268 node=node,
271 node=node,
269 p1node=p1,
272 p1node=p1,
270 p2node=p2,
273 p2node=p2,
271 linknode=self.linknode(node),
274 linknode=self.linknode(node),
272 basenode=basenode,
275 basenode=basenode,
273 flags=self.flags(node),
276 flags=self.flags(node),
274 baserevisionsize=None,
277 baserevisionsize=None,
275 revision=revision,
278 revision=revision,
276 delta=delta,
279 delta=delta,
277 )
280 )
278
281
279 def revdiff(self, node1, node2):
282 def revdiff(self, node1, node2):
280 return mdiff.textdiff(self.rawdata(node1),
283 return mdiff.textdiff(self.rawdata(node1),
281 self.rawdata(node2))
284 self.rawdata(node2))
282
285
283 def lookup(self, node):
286 def lookup(self, node):
284 if len(node) == 40:
287 if len(node) == 40:
285 node = bin(node)
288 node = bin(node)
286 if len(node) != 20:
289 if len(node) != 20:
287 raise error.LookupError(node, self.filename,
290 raise error.LookupError(node, self.filename,
288 _('invalid lookup input'))
291 _('invalid lookup input'))
289
292
290 return node
293 return node
291
294
292 def rev(self, node):
295 def rev(self, node):
293 # This is a hack to make TortoiseHG work.
296 # This is a hack to make TortoiseHG work.
294 return node
297 return node
295
298
296 def node(self, rev):
299 def node(self, rev):
297 # This is a hack.
300 # This is a hack.
298 if isinstance(rev, int):
301 if isinstance(rev, int):
299 raise error.ProgrammingError(
302 raise error.ProgrammingError(
300 'remotefilelog does not convert integer rev to node')
303 'remotefilelog does not convert integer rev to node')
301 return rev
304 return rev
302
305
303 def revision(self, node, raw=False):
306 def revision(self, node, raw=False):
304 """returns the revlog contents at this node.
307 """returns the revlog contents at this node.
305 this includes the meta data traditionally included in file revlogs.
308 this includes the meta data traditionally included in file revlogs.
306 this is generally only used for bundling and communicating with vanilla
309 this is generally only used for bundling and communicating with vanilla
307 hg clients.
310 hg clients.
308 """
311 """
309 if node == nullid:
312 if node == nullid:
310 return ""
313 return ""
311 if len(node) != 20:
314 if len(node) != 20:
312 raise error.LookupError(node, self.filename,
315 raise error.LookupError(node, self.filename,
313 _('invalid revision input'))
316 _('invalid revision input'))
314 if node == wdirid or node in wdirfilenodeids:
317 if node == wdirid or node in wdirfilenodeids:
315 raise error.WdirUnsupported
318 raise error.WdirUnsupported
316
319
317 store = self.repo.contentstore
320 store = self.repo.contentstore
318 rawtext = store.get(self.filename, node)
321 rawtext = store.get(self.filename, node)
319 if raw:
322 if raw:
320 return rawtext
323 return rawtext
321 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
324 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
322 if flags == 0:
325 if flags == 0:
323 return rawtext
326 return rawtext
324 text, verifyhash = self._processflags(rawtext, flags, 'read')
327 text, verifyhash = self._processflags(rawtext, flags, 'read')
325 return text
328 return text
326
329
327 def rawdata(self, node):
330 def rawdata(self, node):
328 return self.revision(node, raw=False)
331 return self.revision(node, raw=False)
329
332
330 def _processflags(self, text, flags, operation, raw=False):
331 # mostly copied from hg/mercurial/revlog.py
332 validatehash = True
333 orderedflags = revlog.REVIDX_FLAGS_ORDER
334 if operation == 'write':
335 orderedflags = reversed(orderedflags)
336 for flag in orderedflags:
337 if flag & flags:
338 vhash = True
339 if flag not in revlog._flagprocessors:
340 message = _("missing processor for flag '%#x'") % (flag)
341 raise revlog.RevlogError(message)
342 readfunc, writefunc, rawfunc = revlog._flagprocessors[flag]
343 if raw:
344 vhash = rawfunc(self, text)
345 elif operation == 'read':
346 text, vhash = readfunc(self, text)
347 elif operation == 'write':
348 text, vhash = writefunc(self, text)
349 validatehash = validatehash and vhash
350 return text, validatehash
351
352 def _read(self, id):
333 def _read(self, id):
353 """reads the raw file blob from disk, cache, or server"""
334 """reads the raw file blob from disk, cache, or server"""
354 fileservice = self.repo.fileservice
335 fileservice = self.repo.fileservice
355 localcache = fileservice.localcache
336 localcache = fileservice.localcache
356 cachekey = fileserverclient.getcachekey(self.repo.name, self.filename,
337 cachekey = fileserverclient.getcachekey(self.repo.name, self.filename,
357 id)
338 id)
358 try:
339 try:
359 return localcache.read(cachekey)
340 return localcache.read(cachekey)
360 except KeyError:
341 except KeyError:
361 pass
342 pass
362
343
363 localkey = fileserverclient.getlocalkey(self.filename, id)
344 localkey = fileserverclient.getlocalkey(self.filename, id)
364 localpath = os.path.join(self.localpath, localkey)
345 localpath = os.path.join(self.localpath, localkey)
365 try:
346 try:
366 return shallowutil.readfile(localpath)
347 return shallowutil.readfile(localpath)
367 except IOError:
348 except IOError:
368 pass
349 pass
369
350
370 fileservice.prefetch([(self.filename, id)])
351 fileservice.prefetch([(self.filename, id)])
371 try:
352 try:
372 return localcache.read(cachekey)
353 return localcache.read(cachekey)
373 except KeyError:
354 except KeyError:
374 pass
355 pass
375
356
376 raise error.LookupError(id, self.filename, _('no node'))
357 raise error.LookupError(id, self.filename, _('no node'))
377
358
378 def ancestormap(self, node):
359 def ancestormap(self, node):
379 return self.repo.metadatastore.getancestors(self.filename, node)
360 return self.repo.metadatastore.getancestors(self.filename, node)
380
361
381 def ancestor(self, a, b):
362 def ancestor(self, a, b):
382 if a == nullid or b == nullid:
363 if a == nullid or b == nullid:
383 return nullid
364 return nullid
384
365
385 revmap, parentfunc = self._buildrevgraph(a, b)
366 revmap, parentfunc = self._buildrevgraph(a, b)
386 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
367 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
387
368
388 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
369 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
389 if ancs:
370 if ancs:
390 # choose a consistent winner when there's a tie
371 # choose a consistent winner when there's a tie
391 return min(map(nodemap.__getitem__, ancs))
372 return min(map(nodemap.__getitem__, ancs))
392 return nullid
373 return nullid
393
374
394 def commonancestorsheads(self, a, b):
375 def commonancestorsheads(self, a, b):
395 """calculate all the heads of the common ancestors of nodes a and b"""
376 """calculate all the heads of the common ancestors of nodes a and b"""
396
377
397 if a == nullid or b == nullid:
378 if a == nullid or b == nullid:
398 return nullid
379 return nullid
399
380
400 revmap, parentfunc = self._buildrevgraph(a, b)
381 revmap, parentfunc = self._buildrevgraph(a, b)
401 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
382 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
402
383
403 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
384 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
404 return map(nodemap.__getitem__, ancs)
385 return map(nodemap.__getitem__, ancs)
405
386
406 def _buildrevgraph(self, a, b):
387 def _buildrevgraph(self, a, b):
407 """Builds a numeric revision graph for the given two nodes.
388 """Builds a numeric revision graph for the given two nodes.
408 Returns a node->rev map and a rev->[revs] parent function.
389 Returns a node->rev map and a rev->[revs] parent function.
409 """
390 """
410 amap = self.ancestormap(a)
391 amap = self.ancestormap(a)
411 bmap = self.ancestormap(b)
392 bmap = self.ancestormap(b)
412
393
413 # Union the two maps
394 # Union the two maps
414 parentsmap = collections.defaultdict(list)
395 parentsmap = collections.defaultdict(list)
415 allparents = set()
396 allparents = set()
416 for mapping in (amap, bmap):
397 for mapping in (amap, bmap):
417 for node, pdata in mapping.iteritems():
398 for node, pdata in mapping.iteritems():
418 parents = parentsmap[node]
399 parents = parentsmap[node]
419 p1, p2, linknode, copyfrom = pdata
400 p1, p2, linknode, copyfrom = pdata
420 # Don't follow renames (copyfrom).
401 # Don't follow renames (copyfrom).
421 # remotefilectx.ancestor does that.
402 # remotefilectx.ancestor does that.
422 if p1 != nullid and not copyfrom:
403 if p1 != nullid and not copyfrom:
423 parents.append(p1)
404 parents.append(p1)
424 allparents.add(p1)
405 allparents.add(p1)
425 if p2 != nullid:
406 if p2 != nullid:
426 parents.append(p2)
407 parents.append(p2)
427 allparents.add(p2)
408 allparents.add(p2)
428
409
429 # Breadth first traversal to build linkrev graph
410 # Breadth first traversal to build linkrev graph
430 parentrevs = collections.defaultdict(list)
411 parentrevs = collections.defaultdict(list)
431 revmap = {}
412 revmap = {}
432 queue = collections.deque(((None, n) for n in parentsmap
413 queue = collections.deque(((None, n) for n in parentsmap
433 if n not in allparents))
414 if n not in allparents))
434 while queue:
415 while queue:
435 prevrev, current = queue.pop()
416 prevrev, current = queue.pop()
436 if current in revmap:
417 if current in revmap:
437 if prevrev:
418 if prevrev:
438 parentrevs[prevrev].append(revmap[current])
419 parentrevs[prevrev].append(revmap[current])
439 continue
420 continue
440
421
441 # Assign linkrevs in reverse order, so start at
422 # Assign linkrevs in reverse order, so start at
442 # len(parentsmap) and work backwards.
423 # len(parentsmap) and work backwards.
443 currentrev = len(parentsmap) - len(revmap) - 1
424 currentrev = len(parentsmap) - len(revmap) - 1
444 revmap[current] = currentrev
425 revmap[current] = currentrev
445
426
446 if prevrev:
427 if prevrev:
447 parentrevs[prevrev].append(currentrev)
428 parentrevs[prevrev].append(currentrev)
448
429
449 for parent in parentsmap.get(current):
430 for parent in parentsmap.get(current):
450 queue.appendleft((currentrev, parent))
431 queue.appendleft((currentrev, parent))
451
432
452 return revmap, parentrevs.__getitem__
433 return revmap, parentrevs.__getitem__
453
434
454 def strip(self, minlink, transaction):
435 def strip(self, minlink, transaction):
455 pass
436 pass
456
437
457 # misc unused things
438 # misc unused things
458 def files(self):
439 def files(self):
459 return []
440 return []
460
441
461 def checksize(self):
442 def checksize(self):
462 return 0, 0
443 return 0, 0
General Comments 0
You need to be logged in to leave comments. Login now