##// END OF EJS Templates
flagprocessors: use _processflagsread in simple cases...
marmoute -
r43146:a3665eed default
parent child Browse files
Show More
@@ -1,443 +1,443 b''
1 # remotefilelog.py - filelog implementation where filelog history is stored
1 # remotefilelog.py - filelog implementation where filelog history is stored
2 # remotely
2 # remotely
3 #
3 #
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import os
11 import os
12
12
13 from mercurial.node import (
13 from mercurial.node import (
14 bin,
14 bin,
15 nullid,
15 nullid,
16 wdirfilenodeids,
16 wdirfilenodeids,
17 wdirid,
17 wdirid,
18 )
18 )
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20 from mercurial import (
20 from mercurial import (
21 ancestor,
21 ancestor,
22 error,
22 error,
23 mdiff,
23 mdiff,
24 revlog,
24 revlog,
25 )
25 )
26 from mercurial.utils import storageutil
26 from mercurial.utils import storageutil
27 from mercurial.revlogutils import flagutil
27 from mercurial.revlogutils import flagutil
28
28
29 from . import (
29 from . import (
30 constants,
30 constants,
31 fileserverclient,
31 fileserverclient,
32 shallowutil,
32 shallowutil,
33 )
33 )
34
34
35 class remotefilelognodemap(object):
35 class remotefilelognodemap(object):
36 def __init__(self, filename, store):
36 def __init__(self, filename, store):
37 self._filename = filename
37 self._filename = filename
38 self._store = store
38 self._store = store
39
39
40 def __contains__(self, node):
40 def __contains__(self, node):
41 missing = self._store.getmissing([(self._filename, node)])
41 missing = self._store.getmissing([(self._filename, node)])
42 return not bool(missing)
42 return not bool(missing)
43
43
44 def __get__(self, node):
44 def __get__(self, node):
45 if node not in self:
45 if node not in self:
46 raise KeyError(node)
46 raise KeyError(node)
47 return node
47 return node
48
48
49 class remotefilelog(flagutil.flagprocessorsmixin):
49 class remotefilelog(flagutil.flagprocessorsmixin):
50
50
51 _generaldelta = True
51 _generaldelta = True
52
52
53 def __init__(self, opener, path, repo):
53 def __init__(self, opener, path, repo):
54 self.opener = opener
54 self.opener = opener
55 self.filename = path
55 self.filename = path
56 self.repo = repo
56 self.repo = repo
57 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
57 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
58
58
59 self.version = 1
59 self.version = 1
60
60
61 self._flagprocessors = dict(flagutil.flagprocessors)
61 self._flagprocessors = dict(flagutil.flagprocessors)
62
62
63 def read(self, node):
63 def read(self, node):
64 """returns the file contents at this node"""
64 """returns the file contents at this node"""
65 t = self.revision(node)
65 t = self.revision(node)
66 if not t.startswith('\1\n'):
66 if not t.startswith('\1\n'):
67 return t
67 return t
68 s = t.index('\1\n', 2)
68 s = t.index('\1\n', 2)
69 return t[s + 2:]
69 return t[s + 2:]
70
70
71 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
71 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
72 # hash with the metadata, like in vanilla filelogs
72 # hash with the metadata, like in vanilla filelogs
73 hashtext = shallowutil.createrevlogtext(text, meta.get('copy'),
73 hashtext = shallowutil.createrevlogtext(text, meta.get('copy'),
74 meta.get('copyrev'))
74 meta.get('copyrev'))
75 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
75 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
76 return self.addrevision(hashtext, transaction, linknode, p1, p2,
76 return self.addrevision(hashtext, transaction, linknode, p1, p2,
77 node=node)
77 node=node)
78
78
79 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
79 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
80 # text passed to "_createfileblob" does not include filelog metadata
80 # text passed to "_createfileblob" does not include filelog metadata
81 header = shallowutil.buildfileblobheader(len(text), flags)
81 header = shallowutil.buildfileblobheader(len(text), flags)
82 data = "%s\0%s" % (header, text)
82 data = "%s\0%s" % (header, text)
83
83
84 realp1 = p1
84 realp1 = p1
85 copyfrom = ""
85 copyfrom = ""
86 if meta and 'copy' in meta:
86 if meta and 'copy' in meta:
87 copyfrom = meta['copy']
87 copyfrom = meta['copy']
88 realp1 = bin(meta['copyrev'])
88 realp1 = bin(meta['copyrev'])
89
89
90 data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
90 data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
91
91
92 visited = set()
92 visited = set()
93
93
94 pancestors = {}
94 pancestors = {}
95 queue = []
95 queue = []
96 if realp1 != nullid:
96 if realp1 != nullid:
97 p1flog = self
97 p1flog = self
98 if copyfrom:
98 if copyfrom:
99 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
99 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
100
100
101 pancestors.update(p1flog.ancestormap(realp1))
101 pancestors.update(p1flog.ancestormap(realp1))
102 queue.append(realp1)
102 queue.append(realp1)
103 visited.add(realp1)
103 visited.add(realp1)
104 if p2 != nullid:
104 if p2 != nullid:
105 pancestors.update(self.ancestormap(p2))
105 pancestors.update(self.ancestormap(p2))
106 queue.append(p2)
106 queue.append(p2)
107 visited.add(p2)
107 visited.add(p2)
108
108
109 ancestortext = ""
109 ancestortext = ""
110
110
111 # add the ancestors in topological order
111 # add the ancestors in topological order
112 while queue:
112 while queue:
113 c = queue.pop(0)
113 c = queue.pop(0)
114 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
114 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
115
115
116 pacopyfrom = pacopyfrom or ''
116 pacopyfrom = pacopyfrom or ''
117 ancestortext += "%s%s%s%s%s\0" % (
117 ancestortext += "%s%s%s%s%s\0" % (
118 c, pa1, pa2, ancestorlinknode, pacopyfrom)
118 c, pa1, pa2, ancestorlinknode, pacopyfrom)
119
119
120 if pa1 != nullid and pa1 not in visited:
120 if pa1 != nullid and pa1 not in visited:
121 queue.append(pa1)
121 queue.append(pa1)
122 visited.add(pa1)
122 visited.add(pa1)
123 if pa2 != nullid and pa2 not in visited:
123 if pa2 != nullid and pa2 not in visited:
124 queue.append(pa2)
124 queue.append(pa2)
125 visited.add(pa2)
125 visited.add(pa2)
126
126
127 data += ancestortext
127 data += ancestortext
128
128
129 return data
129 return data
130
130
131 def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None,
131 def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None,
132 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS):
132 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS):
133 # text passed to "addrevision" includes hg filelog metadata header
133 # text passed to "addrevision" includes hg filelog metadata header
134 if node is None:
134 if node is None:
135 node = storageutil.hashrevisionsha1(text, p1, p2)
135 node = storageutil.hashrevisionsha1(text, p1, p2)
136
136
137 meta, metaoffset = storageutil.parsemeta(text)
137 meta, metaoffset = storageutil.parsemeta(text)
138 rawtext, validatehash = self._processflagswrite(text, flags)
138 rawtext, validatehash = self._processflagswrite(text, flags)
139 return self.addrawrevision(rawtext, transaction, linknode, p1, p2,
139 return self.addrawrevision(rawtext, transaction, linknode, p1, p2,
140 node, flags, cachedelta,
140 node, flags, cachedelta,
141 _metatuple=(meta, metaoffset))
141 _metatuple=(meta, metaoffset))
142
142
143 def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node,
143 def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node,
144 flags, cachedelta=None, _metatuple=None):
144 flags, cachedelta=None, _metatuple=None):
145 if _metatuple:
145 if _metatuple:
146 # _metatuple: used by "addrevision" internally by remotefilelog
146 # _metatuple: used by "addrevision" internally by remotefilelog
147 # meta was parsed confidently
147 # meta was parsed confidently
148 meta, metaoffset = _metatuple
148 meta, metaoffset = _metatuple
149 else:
149 else:
150 # not from self.addrevision, but something else (repo._filecommit)
150 # not from self.addrevision, but something else (repo._filecommit)
151 # calls addrawrevision directly. remotefilelog needs to get and
151 # calls addrawrevision directly. remotefilelog needs to get and
152 # strip filelog metadata.
152 # strip filelog metadata.
153 # we don't have confidence about whether rawtext contains filelog
153 # we don't have confidence about whether rawtext contains filelog
154 # metadata or not (flag processor could replace it), so we just
154 # metadata or not (flag processor could replace it), so we just
155 # parse it as best-effort.
155 # parse it as best-effort.
156 # in LFS (flags != 0)'s case, the best way is to call LFS code to
156 # in LFS (flags != 0)'s case, the best way is to call LFS code to
157 # get the meta information, instead of storageutil.parsemeta.
157 # get the meta information, instead of storageutil.parsemeta.
158 meta, metaoffset = storageutil.parsemeta(rawtext)
158 meta, metaoffset = storageutil.parsemeta(rawtext)
159 if flags != 0:
159 if flags != 0:
160 # when flags != 0, be conservative and do not mangle rawtext, since
160 # when flags != 0, be conservative and do not mangle rawtext, since
161 # a read flag processor expects the text not being mangled at all.
161 # a read flag processor expects the text not being mangled at all.
162 metaoffset = 0
162 metaoffset = 0
163 if metaoffset:
163 if metaoffset:
164 # remotefilelog fileblob stores copy metadata in its ancestortext,
164 # remotefilelog fileblob stores copy metadata in its ancestortext,
165 # not its main blob. so we need to remove filelog metadata
165 # not its main blob. so we need to remove filelog metadata
166 # (containing copy information) from text.
166 # (containing copy information) from text.
167 blobtext = rawtext[metaoffset:]
167 blobtext = rawtext[metaoffset:]
168 else:
168 else:
169 blobtext = rawtext
169 blobtext = rawtext
170 data = self._createfileblob(blobtext, meta, flags, p1, p2, node,
170 data = self._createfileblob(blobtext, meta, flags, p1, p2, node,
171 linknode)
171 linknode)
172 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
172 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
173
173
174 return node
174 return node
175
175
176 def renamed(self, node):
176 def renamed(self, node):
177 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
177 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
178 p1, p2, linknode, copyfrom = ancestors[node]
178 p1, p2, linknode, copyfrom = ancestors[node]
179 if copyfrom:
179 if copyfrom:
180 return (copyfrom, p1)
180 return (copyfrom, p1)
181
181
182 return False
182 return False
183
183
184 def size(self, node):
184 def size(self, node):
185 """return the size of a given revision"""
185 """return the size of a given revision"""
186 return len(self.read(node))
186 return len(self.read(node))
187
187
188 rawsize = size
188 rawsize = size
189
189
190 def cmp(self, node, text):
190 def cmp(self, node, text):
191 """compare text with a given file revision
191 """compare text with a given file revision
192
192
193 returns True if text is different than what is stored.
193 returns True if text is different than what is stored.
194 """
194 """
195
195
196 if node == nullid:
196 if node == nullid:
197 return True
197 return True
198
198
199 nodetext = self.read(node)
199 nodetext = self.read(node)
200 return nodetext != text
200 return nodetext != text
201
201
202 def __nonzero__(self):
202 def __nonzero__(self):
203 return True
203 return True
204
204
205 __bool__ = __nonzero__
205 __bool__ = __nonzero__
206
206
207 def __len__(self):
207 def __len__(self):
208 if self.filename == '.hgtags':
208 if self.filename == '.hgtags':
209 # The length of .hgtags is used to fast path tag checking.
209 # The length of .hgtags is used to fast path tag checking.
210 # remotefilelog doesn't support .hgtags since the entire .hgtags
210 # remotefilelog doesn't support .hgtags since the entire .hgtags
211 # history is needed. Use the excludepattern setting to make
211 # history is needed. Use the excludepattern setting to make
212 # .hgtags a normal filelog.
212 # .hgtags a normal filelog.
213 return 0
213 return 0
214
214
215 raise RuntimeError("len not supported")
215 raise RuntimeError("len not supported")
216
216
217 def empty(self):
217 def empty(self):
218 return False
218 return False
219
219
220 def flags(self, node):
220 def flags(self, node):
221 if isinstance(node, int):
221 if isinstance(node, int):
222 raise error.ProgrammingError(
222 raise error.ProgrammingError(
223 'remotefilelog does not accept integer rev for flags')
223 'remotefilelog does not accept integer rev for flags')
224 store = self.repo.contentstore
224 store = self.repo.contentstore
225 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
225 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
226
226
227 def parents(self, node):
227 def parents(self, node):
228 if node == nullid:
228 if node == nullid:
229 return nullid, nullid
229 return nullid, nullid
230
230
231 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
231 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
232 p1, p2, linknode, copyfrom = ancestormap[node]
232 p1, p2, linknode, copyfrom = ancestormap[node]
233 if copyfrom:
233 if copyfrom:
234 p1 = nullid
234 p1 = nullid
235
235
236 return p1, p2
236 return p1, p2
237
237
238 def parentrevs(self, rev):
238 def parentrevs(self, rev):
239 # TODO(augie): this is a node and should be a rev, but for now
239 # TODO(augie): this is a node and should be a rev, but for now
240 # nothing in core seems to actually break.
240 # nothing in core seems to actually break.
241 return self.parents(rev)
241 return self.parents(rev)
242
242
243 def linknode(self, node):
243 def linknode(self, node):
244 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
244 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
245 p1, p2, linknode, copyfrom = ancestormap[node]
245 p1, p2, linknode, copyfrom = ancestormap[node]
246 return linknode
246 return linknode
247
247
248 def linkrev(self, node):
248 def linkrev(self, node):
249 return self.repo.unfiltered().changelog.rev(self.linknode(node))
249 return self.repo.unfiltered().changelog.rev(self.linknode(node))
250
250
251 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
251 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
252 assumehaveparentrevisions=False, deltaprevious=False,
252 assumehaveparentrevisions=False, deltaprevious=False,
253 deltamode=None):
253 deltamode=None):
254 # we don't use any of these parameters here
254 # we don't use any of these parameters here
255 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
255 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
256 del deltamode
256 del deltamode
257 prevnode = None
257 prevnode = None
258 for node in nodes:
258 for node in nodes:
259 p1, p2 = self.parents(node)
259 p1, p2 = self.parents(node)
260 if prevnode is None:
260 if prevnode is None:
261 basenode = prevnode = p1
261 basenode = prevnode = p1
262 if basenode == node:
262 if basenode == node:
263 basenode = nullid
263 basenode = nullid
264 if basenode != nullid:
264 if basenode != nullid:
265 revision = None
265 revision = None
266 delta = self.revdiff(basenode, node)
266 delta = self.revdiff(basenode, node)
267 else:
267 else:
268 revision = self.rawdata(node)
268 revision = self.rawdata(node)
269 delta = None
269 delta = None
270 yield revlog.revlogrevisiondelta(
270 yield revlog.revlogrevisiondelta(
271 node=node,
271 node=node,
272 p1node=p1,
272 p1node=p1,
273 p2node=p2,
273 p2node=p2,
274 linknode=self.linknode(node),
274 linknode=self.linknode(node),
275 basenode=basenode,
275 basenode=basenode,
276 flags=self.flags(node),
276 flags=self.flags(node),
277 baserevisionsize=None,
277 baserevisionsize=None,
278 revision=revision,
278 revision=revision,
279 delta=delta,
279 delta=delta,
280 )
280 )
281
281
282 def revdiff(self, node1, node2):
282 def revdiff(self, node1, node2):
283 return mdiff.textdiff(self.rawdata(node1),
283 return mdiff.textdiff(self.rawdata(node1),
284 self.rawdata(node2))
284 self.rawdata(node2))
285
285
286 def lookup(self, node):
286 def lookup(self, node):
287 if len(node) == 40:
287 if len(node) == 40:
288 node = bin(node)
288 node = bin(node)
289 if len(node) != 20:
289 if len(node) != 20:
290 raise error.LookupError(node, self.filename,
290 raise error.LookupError(node, self.filename,
291 _('invalid lookup input'))
291 _('invalid lookup input'))
292
292
293 return node
293 return node
294
294
295 def rev(self, node):
295 def rev(self, node):
296 # This is a hack to make TortoiseHG work.
296 # This is a hack to make TortoiseHG work.
297 return node
297 return node
298
298
299 def node(self, rev):
299 def node(self, rev):
300 # This is a hack.
300 # This is a hack.
301 if isinstance(rev, int):
301 if isinstance(rev, int):
302 raise error.ProgrammingError(
302 raise error.ProgrammingError(
303 'remotefilelog does not convert integer rev to node')
303 'remotefilelog does not convert integer rev to node')
304 return rev
304 return rev
305
305
306 def revision(self, node, raw=False):
306 def revision(self, node, raw=False):
307 """returns the revlog contents at this node.
307 """returns the revlog contents at this node.
308 this includes the meta data traditionally included in file revlogs.
308 this includes the meta data traditionally included in file revlogs.
309 this is generally only used for bundling and communicating with vanilla
309 this is generally only used for bundling and communicating with vanilla
310 hg clients.
310 hg clients.
311 """
311 """
312 if node == nullid:
312 if node == nullid:
313 return ""
313 return ""
314 if len(node) != 20:
314 if len(node) != 20:
315 raise error.LookupError(node, self.filename,
315 raise error.LookupError(node, self.filename,
316 _('invalid revision input'))
316 _('invalid revision input'))
317 if node == wdirid or node in wdirfilenodeids:
317 if node == wdirid or node in wdirfilenodeids:
318 raise error.WdirUnsupported
318 raise error.WdirUnsupported
319
319
320 store = self.repo.contentstore
320 store = self.repo.contentstore
321 rawtext = store.get(self.filename, node)
321 rawtext = store.get(self.filename, node)
322 if raw:
322 if raw:
323 return rawtext
323 return rawtext
324 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
324 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
325 if flags == 0:
325 if flags == 0:
326 return rawtext
326 return rawtext
327 text, verifyhash = self._processflags(rawtext, flags, 'read')
327 text, verifyhash = self._processflagsread(rawtext, flags)
328 return text
328 return text
329
329
330 def rawdata(self, node):
330 def rawdata(self, node):
331 return self.revision(node, raw=False)
331 return self.revision(node, raw=False)
332
332
333 def _read(self, id):
333 def _read(self, id):
334 """reads the raw file blob from disk, cache, or server"""
334 """reads the raw file blob from disk, cache, or server"""
335 fileservice = self.repo.fileservice
335 fileservice = self.repo.fileservice
336 localcache = fileservice.localcache
336 localcache = fileservice.localcache
337 cachekey = fileserverclient.getcachekey(self.repo.name, self.filename,
337 cachekey = fileserverclient.getcachekey(self.repo.name, self.filename,
338 id)
338 id)
339 try:
339 try:
340 return localcache.read(cachekey)
340 return localcache.read(cachekey)
341 except KeyError:
341 except KeyError:
342 pass
342 pass
343
343
344 localkey = fileserverclient.getlocalkey(self.filename, id)
344 localkey = fileserverclient.getlocalkey(self.filename, id)
345 localpath = os.path.join(self.localpath, localkey)
345 localpath = os.path.join(self.localpath, localkey)
346 try:
346 try:
347 return shallowutil.readfile(localpath)
347 return shallowutil.readfile(localpath)
348 except IOError:
348 except IOError:
349 pass
349 pass
350
350
351 fileservice.prefetch([(self.filename, id)])
351 fileservice.prefetch([(self.filename, id)])
352 try:
352 try:
353 return localcache.read(cachekey)
353 return localcache.read(cachekey)
354 except KeyError:
354 except KeyError:
355 pass
355 pass
356
356
357 raise error.LookupError(id, self.filename, _('no node'))
357 raise error.LookupError(id, self.filename, _('no node'))
358
358
359 def ancestormap(self, node):
359 def ancestormap(self, node):
360 return self.repo.metadatastore.getancestors(self.filename, node)
360 return self.repo.metadatastore.getancestors(self.filename, node)
361
361
362 def ancestor(self, a, b):
362 def ancestor(self, a, b):
363 if a == nullid or b == nullid:
363 if a == nullid or b == nullid:
364 return nullid
364 return nullid
365
365
366 revmap, parentfunc = self._buildrevgraph(a, b)
366 revmap, parentfunc = self._buildrevgraph(a, b)
367 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
367 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
368
368
369 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
369 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
370 if ancs:
370 if ancs:
371 # choose a consistent winner when there's a tie
371 # choose a consistent winner when there's a tie
372 return min(map(nodemap.__getitem__, ancs))
372 return min(map(nodemap.__getitem__, ancs))
373 return nullid
373 return nullid
374
374
375 def commonancestorsheads(self, a, b):
375 def commonancestorsheads(self, a, b):
376 """calculate all the heads of the common ancestors of nodes a and b"""
376 """calculate all the heads of the common ancestors of nodes a and b"""
377
377
378 if a == nullid or b == nullid:
378 if a == nullid or b == nullid:
379 return nullid
379 return nullid
380
380
381 revmap, parentfunc = self._buildrevgraph(a, b)
381 revmap, parentfunc = self._buildrevgraph(a, b)
382 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
382 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
383
383
384 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
384 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
385 return map(nodemap.__getitem__, ancs)
385 return map(nodemap.__getitem__, ancs)
386
386
387 def _buildrevgraph(self, a, b):
387 def _buildrevgraph(self, a, b):
388 """Builds a numeric revision graph for the given two nodes.
388 """Builds a numeric revision graph for the given two nodes.
389 Returns a node->rev map and a rev->[revs] parent function.
389 Returns a node->rev map and a rev->[revs] parent function.
390 """
390 """
391 amap = self.ancestormap(a)
391 amap = self.ancestormap(a)
392 bmap = self.ancestormap(b)
392 bmap = self.ancestormap(b)
393
393
394 # Union the two maps
394 # Union the two maps
395 parentsmap = collections.defaultdict(list)
395 parentsmap = collections.defaultdict(list)
396 allparents = set()
396 allparents = set()
397 for mapping in (amap, bmap):
397 for mapping in (amap, bmap):
398 for node, pdata in mapping.iteritems():
398 for node, pdata in mapping.iteritems():
399 parents = parentsmap[node]
399 parents = parentsmap[node]
400 p1, p2, linknode, copyfrom = pdata
400 p1, p2, linknode, copyfrom = pdata
401 # Don't follow renames (copyfrom).
401 # Don't follow renames (copyfrom).
402 # remotefilectx.ancestor does that.
402 # remotefilectx.ancestor does that.
403 if p1 != nullid and not copyfrom:
403 if p1 != nullid and not copyfrom:
404 parents.append(p1)
404 parents.append(p1)
405 allparents.add(p1)
405 allparents.add(p1)
406 if p2 != nullid:
406 if p2 != nullid:
407 parents.append(p2)
407 parents.append(p2)
408 allparents.add(p2)
408 allparents.add(p2)
409
409
410 # Breadth first traversal to build linkrev graph
410 # Breadth first traversal to build linkrev graph
411 parentrevs = collections.defaultdict(list)
411 parentrevs = collections.defaultdict(list)
412 revmap = {}
412 revmap = {}
413 queue = collections.deque(((None, n) for n in parentsmap
413 queue = collections.deque(((None, n) for n in parentsmap
414 if n not in allparents))
414 if n not in allparents))
415 while queue:
415 while queue:
416 prevrev, current = queue.pop()
416 prevrev, current = queue.pop()
417 if current in revmap:
417 if current in revmap:
418 if prevrev:
418 if prevrev:
419 parentrevs[prevrev].append(revmap[current])
419 parentrevs[prevrev].append(revmap[current])
420 continue
420 continue
421
421
422 # Assign linkrevs in reverse order, so start at
422 # Assign linkrevs in reverse order, so start at
423 # len(parentsmap) and work backwards.
423 # len(parentsmap) and work backwards.
424 currentrev = len(parentsmap) - len(revmap) - 1
424 currentrev = len(parentsmap) - len(revmap) - 1
425 revmap[current] = currentrev
425 revmap[current] = currentrev
426
426
427 if prevrev:
427 if prevrev:
428 parentrevs[prevrev].append(currentrev)
428 parentrevs[prevrev].append(currentrev)
429
429
430 for parent in parentsmap.get(current):
430 for parent in parentsmap.get(current):
431 queue.appendleft((currentrev, parent))
431 queue.appendleft((currentrev, parent))
432
432
433 return revmap, parentrevs.__getitem__
433 return revmap, parentrevs.__getitem__
434
434
435 def strip(self, minlink, transaction):
435 def strip(self, minlink, transaction):
436 pass
436 pass
437
437
438 # misc unused things
438 # misc unused things
439 def files(self):
439 def files(self):
440 return []
440 return []
441
441
442 def checksize(self):
442 def checksize(self):
443 return 0, 0
443 return 0, 0
General Comments 0
You need to be logged in to leave comments. Login now