##// END OF EJS Templates
remotefilelog: prune obsolete method...
Augie Fackler -
r40535:b6785410 default
parent child Browse files
Show More
@@ -1,481 +1,452 b''
1 # remotefilelog.py - filelog implementation where filelog history is stored
1 # remotefilelog.py - filelog implementation where filelog history is stored
2 # remotely
2 # remotely
3 #
3 #
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import os
11 import os
12
12
13 from mercurial.node import bin, nullid
13 from mercurial.node import bin, nullid
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial import (
15 from mercurial import (
16 ancestor,
16 ancestor,
17 error,
17 error,
18 mdiff,
18 mdiff,
19 revlog,
19 revlog,
20 )
20 )
21 from mercurial.utils import storageutil
21 from mercurial.utils import storageutil
22
22
23 from . import (
23 from . import (
24 constants,
24 constants,
25 fileserverclient,
25 fileserverclient,
26 shallowutil,
26 shallowutil,
27 )
27 )
28
28
29 class remotefilelognodemap(object):
29 class remotefilelognodemap(object):
30 def __init__(self, filename, store):
30 def __init__(self, filename, store):
31 self._filename = filename
31 self._filename = filename
32 self._store = store
32 self._store = store
33
33
34 def __contains__(self, node):
34 def __contains__(self, node):
35 missing = self._store.getmissing([(self._filename, node)])
35 missing = self._store.getmissing([(self._filename, node)])
36 return not bool(missing)
36 return not bool(missing)
37
37
38 def __get__(self, node):
38 def __get__(self, node):
39 if node not in self:
39 if node not in self:
40 raise KeyError(node)
40 raise KeyError(node)
41 return node
41 return node
42
42
43 class remotefilelog(object):
43 class remotefilelog(object):
44
44
45 _generaldelta = True
45 _generaldelta = True
46
46
47 def __init__(self, opener, path, repo):
47 def __init__(self, opener, path, repo):
48 self.opener = opener
48 self.opener = opener
49 self.filename = path
49 self.filename = path
50 self.repo = repo
50 self.repo = repo
51 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
51 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
52
52
53 self.version = 1
53 self.version = 1
54
54
55 def read(self, node):
55 def read(self, node):
56 """returns the file contents at this node"""
56 """returns the file contents at this node"""
57 t = self.revision(node)
57 t = self.revision(node)
58 if not t.startswith('\1\n'):
58 if not t.startswith('\1\n'):
59 return t
59 return t
60 s = t.index('\1\n', 2)
60 s = t.index('\1\n', 2)
61 return t[s + 2:]
61 return t[s + 2:]
62
62
63 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
63 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
64 hashtext = text
64 hashtext = text
65
65
66 # hash with the metadata, like in vanilla filelogs
66 # hash with the metadata, like in vanilla filelogs
67 hashtext = shallowutil.createrevlogtext(text, meta.get('copy'),
67 hashtext = shallowutil.createrevlogtext(text, meta.get('copy'),
68 meta.get('copyrev'))
68 meta.get('copyrev'))
69 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
69 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
70 return self.addrevision(hashtext, transaction, linknode, p1, p2,
70 return self.addrevision(hashtext, transaction, linknode, p1, p2,
71 node=node)
71 node=node)
72
72
73 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
73 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
74 # text passed to "_createfileblob" does not include filelog metadata
74 # text passed to "_createfileblob" does not include filelog metadata
75 header = shallowutil.buildfileblobheader(len(text), flags)
75 header = shallowutil.buildfileblobheader(len(text), flags)
76 data = "%s\0%s" % (header, text)
76 data = "%s\0%s" % (header, text)
77
77
78 realp1 = p1
78 realp1 = p1
79 copyfrom = ""
79 copyfrom = ""
80 if meta and 'copy' in meta:
80 if meta and 'copy' in meta:
81 copyfrom = meta['copy']
81 copyfrom = meta['copy']
82 realp1 = bin(meta['copyrev'])
82 realp1 = bin(meta['copyrev'])
83
83
84 data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
84 data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
85
85
86 visited = set()
86 visited = set()
87
87
88 pancestors = {}
88 pancestors = {}
89 queue = []
89 queue = []
90 if realp1 != nullid:
90 if realp1 != nullid:
91 p1flog = self
91 p1flog = self
92 if copyfrom:
92 if copyfrom:
93 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
93 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
94
94
95 pancestors.update(p1flog.ancestormap(realp1))
95 pancestors.update(p1flog.ancestormap(realp1))
96 queue.append(realp1)
96 queue.append(realp1)
97 visited.add(realp1)
97 visited.add(realp1)
98 if p2 != nullid:
98 if p2 != nullid:
99 pancestors.update(self.ancestormap(p2))
99 pancestors.update(self.ancestormap(p2))
100 queue.append(p2)
100 queue.append(p2)
101 visited.add(p2)
101 visited.add(p2)
102
102
103 ancestortext = ""
103 ancestortext = ""
104
104
105 # add the ancestors in topological order
105 # add the ancestors in topological order
106 while queue:
106 while queue:
107 c = queue.pop(0)
107 c = queue.pop(0)
108 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
108 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
109
109
110 pacopyfrom = pacopyfrom or ''
110 pacopyfrom = pacopyfrom or ''
111 ancestortext += "%s%s%s%s%s\0" % (
111 ancestortext += "%s%s%s%s%s\0" % (
112 c, pa1, pa2, ancestorlinknode, pacopyfrom)
112 c, pa1, pa2, ancestorlinknode, pacopyfrom)
113
113
114 if pa1 != nullid and pa1 not in visited:
114 if pa1 != nullid and pa1 not in visited:
115 queue.append(pa1)
115 queue.append(pa1)
116 visited.add(pa1)
116 visited.add(pa1)
117 if pa2 != nullid and pa2 not in visited:
117 if pa2 != nullid and pa2 not in visited:
118 queue.append(pa2)
118 queue.append(pa2)
119 visited.add(pa2)
119 visited.add(pa2)
120
120
121 data += ancestortext
121 data += ancestortext
122
122
123 return data
123 return data
124
124
125 def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None,
125 def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None,
126 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS):
126 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS):
127 # text passed to "addrevision" includes hg filelog metadata header
127 # text passed to "addrevision" includes hg filelog metadata header
128 if node is None:
128 if node is None:
129 node = storageutil.hashrevisionsha1(text, p1, p2)
129 node = storageutil.hashrevisionsha1(text, p1, p2)
130
130
131 meta, metaoffset = storageutil.parsemeta(text)
131 meta, metaoffset = storageutil.parsemeta(text)
132 rawtext, validatehash = self._processflags(text, flags, 'write')
132 rawtext, validatehash = self._processflags(text, flags, 'write')
133 return self.addrawrevision(rawtext, transaction, linknode, p1, p2,
133 return self.addrawrevision(rawtext, transaction, linknode, p1, p2,
134 node, flags, cachedelta,
134 node, flags, cachedelta,
135 _metatuple=(meta, metaoffset))
135 _metatuple=(meta, metaoffset))
136
136
137 def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node,
137 def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node,
138 flags, cachedelta=None, _metatuple=None):
138 flags, cachedelta=None, _metatuple=None):
139 if _metatuple:
139 if _metatuple:
140 # _metatuple: used by "addrevision" internally by remotefilelog
140 # _metatuple: used by "addrevision" internally by remotefilelog
141 # meta was parsed confidently
141 # meta was parsed confidently
142 meta, metaoffset = _metatuple
142 meta, metaoffset = _metatuple
143 else:
143 else:
144 # not from self.addrevision, but something else (repo._filecommit)
144 # not from self.addrevision, but something else (repo._filecommit)
145 # calls addrawrevision directly. remotefilelog needs to get and
145 # calls addrawrevision directly. remotefilelog needs to get and
146 # strip filelog metadata.
146 # strip filelog metadata.
147 # we don't have confidence about whether rawtext contains filelog
147 # we don't have confidence about whether rawtext contains filelog
148 # metadata or not (flag processor could replace it), so we just
148 # metadata or not (flag processor could replace it), so we just
149 # parse it as best-effort.
149 # parse it as best-effort.
150 # in LFS (flags != 0)'s case, the best way is to call LFS code to
150 # in LFS (flags != 0)'s case, the best way is to call LFS code to
151 # get the meta information, instead of storageutil.parsemeta.
151 # get the meta information, instead of storageutil.parsemeta.
152 meta, metaoffset = storageutil.parsemeta(rawtext)
152 meta, metaoffset = storageutil.parsemeta(rawtext)
153 if flags != 0:
153 if flags != 0:
154 # when flags != 0, be conservative and do not mangle rawtext, since
154 # when flags != 0, be conservative and do not mangle rawtext, since
155 # a read flag processor expects the text not being mangled at all.
155 # a read flag processor expects the text not being mangled at all.
156 metaoffset = 0
156 metaoffset = 0
157 if metaoffset:
157 if metaoffset:
158 # remotefilelog fileblob stores copy metadata in its ancestortext,
158 # remotefilelog fileblob stores copy metadata in its ancestortext,
159 # not its main blob. so we need to remove filelog metadata
159 # not its main blob. so we need to remove filelog metadata
160 # (containing copy information) from text.
160 # (containing copy information) from text.
161 blobtext = rawtext[metaoffset:]
161 blobtext = rawtext[metaoffset:]
162 else:
162 else:
163 blobtext = rawtext
163 blobtext = rawtext
164 data = self._createfileblob(blobtext, meta, flags, p1, p2, node,
164 data = self._createfileblob(blobtext, meta, flags, p1, p2, node,
165 linknode)
165 linknode)
166 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
166 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
167
167
168 return node
168 return node
169
169
170 def renamed(self, node):
170 def renamed(self, node):
171 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
171 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
172 p1, p2, linknode, copyfrom = ancestors[node]
172 p1, p2, linknode, copyfrom = ancestors[node]
173 if copyfrom:
173 if copyfrom:
174 return (copyfrom, p1)
174 return (copyfrom, p1)
175
175
176 return False
176 return False
177
177
178 def size(self, node):
178 def size(self, node):
179 """return the size of a given revision"""
179 """return the size of a given revision"""
180 return len(self.read(node))
180 return len(self.read(node))
181
181
182 rawsize = size
182 rawsize = size
183
183
184 def cmp(self, node, text):
184 def cmp(self, node, text):
185 """compare text with a given file revision
185 """compare text with a given file revision
186
186
187 returns True if text is different than what is stored.
187 returns True if text is different than what is stored.
188 """
188 """
189
189
190 if node == nullid:
190 if node == nullid:
191 return True
191 return True
192
192
193 nodetext = self.read(node)
193 nodetext = self.read(node)
194 return nodetext != text
194 return nodetext != text
195
195
196 def __nonzero__(self):
196 def __nonzero__(self):
197 return True
197 return True
198
198
199 def __len__(self):
199 def __len__(self):
200 if self.filename == '.hgtags':
200 if self.filename == '.hgtags':
201 # The length of .hgtags is used to fast path tag checking.
201 # The length of .hgtags is used to fast path tag checking.
202 # remotefilelog doesn't support .hgtags since the entire .hgtags
202 # remotefilelog doesn't support .hgtags since the entire .hgtags
203 # history is needed. Use the excludepattern setting to make
203 # history is needed. Use the excludepattern setting to make
204 # .hgtags a normal filelog.
204 # .hgtags a normal filelog.
205 return 0
205 return 0
206
206
207 raise RuntimeError("len not supported")
207 raise RuntimeError("len not supported")
208
208
209 def empty(self):
209 def empty(self):
210 return False
210 return False
211
211
212 def flags(self, node):
212 def flags(self, node):
213 if isinstance(node, int):
213 if isinstance(node, int):
214 raise error.ProgrammingError(
214 raise error.ProgrammingError(
215 'remotefilelog does not accept integer rev for flags')
215 'remotefilelog does not accept integer rev for flags')
216 store = self.repo.contentstore
216 store = self.repo.contentstore
217 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
217 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
218
218
219 def parents(self, node):
219 def parents(self, node):
220 if node == nullid:
220 if node == nullid:
221 return nullid, nullid
221 return nullid, nullid
222
222
223 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
223 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
224 p1, p2, linknode, copyfrom = ancestormap[node]
224 p1, p2, linknode, copyfrom = ancestormap[node]
225 if copyfrom:
225 if copyfrom:
226 p1 = nullid
226 p1 = nullid
227
227
228 return p1, p2
228 return p1, p2
229
229
230 def parentrevs(self, rev):
230 def parentrevs(self, rev):
231 # TODO(augie): this is a node and should be a rev, but for now
231 # TODO(augie): this is a node and should be a rev, but for now
232 # nothing in core seems to actually break.
232 # nothing in core seems to actually break.
233 return self.parents(rev)
233 return self.parents(rev)
234
234
235 def linknode(self, node):
235 def linknode(self, node):
236 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
236 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
237 p1, p2, linknode, copyfrom = ancestormap[node]
237 p1, p2, linknode, copyfrom = ancestormap[node]
238 return linknode
238 return linknode
239
239
240 def linkrev(self, node):
240 def linkrev(self, node):
241 return self.repo.unfiltered().changelog.rev(self.linknode(node))
241 return self.repo.unfiltered().changelog.rev(self.linknode(node))
242
242
243 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
243 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
244 assumehaveparentrevisions=False, deltaprevious=False,
244 assumehaveparentrevisions=False, deltaprevious=False,
245 deltamode=None):
245 deltamode=None):
246 # we don't use any of these parameters here
246 # we don't use any of these parameters here
247 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
247 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
248 del deltamode
248 del deltamode
249 prevnode = None
249 prevnode = None
250 for node in nodes:
250 for node in nodes:
251 p1, p2 = self.parents(node)
251 p1, p2 = self.parents(node)
252 if prevnode is None:
252 if prevnode is None:
253 basenode = prevnode = p1
253 basenode = prevnode = p1
254 if basenode == node:
254 if basenode == node:
255 basenode = nullid
255 basenode = nullid
256 if basenode != nullid:
256 if basenode != nullid:
257 revision = None
257 revision = None
258 delta = self.revdiff(basenode, node)
258 delta = self.revdiff(basenode, node)
259 else:
259 else:
260 revision = self.revision(node, raw=True)
260 revision = self.revision(node, raw=True)
261 delta = None
261 delta = None
262 yield revlog.revlogrevisiondelta(
262 yield revlog.revlogrevisiondelta(
263 node=node,
263 node=node,
264 p1node=p1,
264 p1node=p1,
265 p2node=p2,
265 p2node=p2,
266 linknode=self.linknode(node),
266 linknode=self.linknode(node),
267 basenode=basenode,
267 basenode=basenode,
268 flags=self.flags(node),
268 flags=self.flags(node),
269 baserevisionsize=None,
269 baserevisionsize=None,
270 revision=revision,
270 revision=revision,
271 delta=delta,
271 delta=delta,
272 )
272 )
273
273
274 def emitrevisiondeltas(self, requests):
275 prevnode = None
276 for request in requests:
277 node = request.node
278 p1, p2 = self.parents(node)
279 if prevnode is None:
280 prevnode = p1
281 if request.basenode is not None:
282 basenode = request.basenode
283 else:
284 basenode = p1
285 if basenode == nullid:
286 revision = self.revision(node, raw=True)
287 delta = None
288 else:
289 revision = None
290 delta = self.revdiff(basenode, node)
291 yield revlog.revlogrevisiondelta(
292 node=node,
293 p1node=p1,
294 p2node=p2,
295 linknode=self.linknode(node),
296 basenode=basenode,
297 flags=self.flags(node),
298 baserevisionsize=None,
299 revision=revision,
300 delta=delta,
301 )
302
303 def revdiff(self, node1, node2):
274 def revdiff(self, node1, node2):
304 return mdiff.textdiff(self.revision(node1, raw=True),
275 return mdiff.textdiff(self.revision(node1, raw=True),
305 self.revision(node2, raw=True))
276 self.revision(node2, raw=True))
306
277
307 def lookup(self, node):
278 def lookup(self, node):
308 if len(node) == 40:
279 if len(node) == 40:
309 node = bin(node)
280 node = bin(node)
310 if len(node) != 20:
281 if len(node) != 20:
311 raise error.LookupError(node, self.filename,
282 raise error.LookupError(node, self.filename,
312 _('invalid lookup input'))
283 _('invalid lookup input'))
313
284
314 return node
285 return node
315
286
316 def rev(self, node):
287 def rev(self, node):
317 # This is a hack to make TortoiseHG work.
288 # This is a hack to make TortoiseHG work.
318 return node
289 return node
319
290
320 def node(self, rev):
291 def node(self, rev):
321 # This is a hack.
292 # This is a hack.
322 if isinstance(rev, int):
293 if isinstance(rev, int):
323 raise error.ProgrammingError(
294 raise error.ProgrammingError(
324 'remotefilelog does not convert integer rev to node')
295 'remotefilelog does not convert integer rev to node')
325 return rev
296 return rev
326
297
327 def revision(self, node, raw=False):
298 def revision(self, node, raw=False):
328 """returns the revlog contents at this node.
299 """returns the revlog contents at this node.
329 this includes the meta data traditionally included in file revlogs.
300 this includes the meta data traditionally included in file revlogs.
330 this is generally only used for bundling and communicating with vanilla
301 this is generally only used for bundling and communicating with vanilla
331 hg clients.
302 hg clients.
332 """
303 """
333 if node == nullid:
304 if node == nullid:
334 return ""
305 return ""
335 if len(node) != 20:
306 if len(node) != 20:
336 raise error.LookupError(node, self.filename,
307 raise error.LookupError(node, self.filename,
337 _('invalid revision input'))
308 _('invalid revision input'))
338
309
339 store = self.repo.contentstore
310 store = self.repo.contentstore
340 rawtext = store.get(self.filename, node)
311 rawtext = store.get(self.filename, node)
341 if raw:
312 if raw:
342 return rawtext
313 return rawtext
343 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
314 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
344 if flags == 0:
315 if flags == 0:
345 return rawtext
316 return rawtext
346 text, verifyhash = self._processflags(rawtext, flags, 'read')
317 text, verifyhash = self._processflags(rawtext, flags, 'read')
347 return text
318 return text
348
319
349 def _processflags(self, text, flags, operation, raw=False):
320 def _processflags(self, text, flags, operation, raw=False):
350 # mostly copied from hg/mercurial/revlog.py
321 # mostly copied from hg/mercurial/revlog.py
351 validatehash = True
322 validatehash = True
352 orderedflags = revlog.REVIDX_FLAGS_ORDER
323 orderedflags = revlog.REVIDX_FLAGS_ORDER
353 if operation == 'write':
324 if operation == 'write':
354 orderedflags = reversed(orderedflags)
325 orderedflags = reversed(orderedflags)
355 for flag in orderedflags:
326 for flag in orderedflags:
356 if flag & flags:
327 if flag & flags:
357 vhash = True
328 vhash = True
358 if flag not in revlog._flagprocessors:
329 if flag not in revlog._flagprocessors:
359 message = _("missing processor for flag '%#x'") % (flag)
330 message = _("missing processor for flag '%#x'") % (flag)
360 raise revlog.RevlogError(message)
331 raise revlog.RevlogError(message)
361 readfunc, writefunc, rawfunc = revlog._flagprocessors[flag]
332 readfunc, writefunc, rawfunc = revlog._flagprocessors[flag]
362 if raw:
333 if raw:
363 vhash = rawfunc(self, text)
334 vhash = rawfunc(self, text)
364 elif operation == 'read':
335 elif operation == 'read':
365 text, vhash = readfunc(self, text)
336 text, vhash = readfunc(self, text)
366 elif operation == 'write':
337 elif operation == 'write':
367 text, vhash = writefunc(self, text)
338 text, vhash = writefunc(self, text)
368 validatehash = validatehash and vhash
339 validatehash = validatehash and vhash
369 return text, validatehash
340 return text, validatehash
370
341
371 def _read(self, id):
342 def _read(self, id):
372 """reads the raw file blob from disk, cache, or server"""
343 """reads the raw file blob from disk, cache, or server"""
373 fileservice = self.repo.fileservice
344 fileservice = self.repo.fileservice
374 localcache = fileservice.localcache
345 localcache = fileservice.localcache
375 cachekey = fileserverclient.getcachekey(self.repo.name, self.filename,
346 cachekey = fileserverclient.getcachekey(self.repo.name, self.filename,
376 id)
347 id)
377 try:
348 try:
378 return localcache.read(cachekey)
349 return localcache.read(cachekey)
379 except KeyError:
350 except KeyError:
380 pass
351 pass
381
352
382 localkey = fileserverclient.getlocalkey(self.filename, id)
353 localkey = fileserverclient.getlocalkey(self.filename, id)
383 localpath = os.path.join(self.localpath, localkey)
354 localpath = os.path.join(self.localpath, localkey)
384 try:
355 try:
385 return shallowutil.readfile(localpath)
356 return shallowutil.readfile(localpath)
386 except IOError:
357 except IOError:
387 pass
358 pass
388
359
389 fileservice.prefetch([(self.filename, id)])
360 fileservice.prefetch([(self.filename, id)])
390 try:
361 try:
391 return localcache.read(cachekey)
362 return localcache.read(cachekey)
392 except KeyError:
363 except KeyError:
393 pass
364 pass
394
365
395 raise error.LookupError(id, self.filename, _('no node'))
366 raise error.LookupError(id, self.filename, _('no node'))
396
367
397 def ancestormap(self, node):
368 def ancestormap(self, node):
398 return self.repo.metadatastore.getancestors(self.filename, node)
369 return self.repo.metadatastore.getancestors(self.filename, node)
399
370
400 def ancestor(self, a, b):
371 def ancestor(self, a, b):
401 if a == nullid or b == nullid:
372 if a == nullid or b == nullid:
402 return nullid
373 return nullid
403
374
404 revmap, parentfunc = self._buildrevgraph(a, b)
375 revmap, parentfunc = self._buildrevgraph(a, b)
405 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
376 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
406
377
407 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
378 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
408 if ancs:
379 if ancs:
409 # choose a consistent winner when there's a tie
380 # choose a consistent winner when there's a tie
410 return min(map(nodemap.__getitem__, ancs))
381 return min(map(nodemap.__getitem__, ancs))
411 return nullid
382 return nullid
412
383
413 def commonancestorsheads(self, a, b):
384 def commonancestorsheads(self, a, b):
414 """calculate all the heads of the common ancestors of nodes a and b"""
385 """calculate all the heads of the common ancestors of nodes a and b"""
415
386
416 if a == nullid or b == nullid:
387 if a == nullid or b == nullid:
417 return nullid
388 return nullid
418
389
419 revmap, parentfunc = self._buildrevgraph(a, b)
390 revmap, parentfunc = self._buildrevgraph(a, b)
420 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
391 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
421
392
422 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
393 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
423 return map(nodemap.__getitem__, ancs)
394 return map(nodemap.__getitem__, ancs)
424
395
425 def _buildrevgraph(self, a, b):
396 def _buildrevgraph(self, a, b):
426 """Builds a numeric revision graph for the given two nodes.
397 """Builds a numeric revision graph for the given two nodes.
427 Returns a node->rev map and a rev->[revs] parent function.
398 Returns a node->rev map and a rev->[revs] parent function.
428 """
399 """
429 amap = self.ancestormap(a)
400 amap = self.ancestormap(a)
430 bmap = self.ancestormap(b)
401 bmap = self.ancestormap(b)
431
402
432 # Union the two maps
403 # Union the two maps
433 parentsmap = collections.defaultdict(list)
404 parentsmap = collections.defaultdict(list)
434 allparents = set()
405 allparents = set()
435 for mapping in (amap, bmap):
406 for mapping in (amap, bmap):
436 for node, pdata in mapping.iteritems():
407 for node, pdata in mapping.iteritems():
437 parents = parentsmap[node]
408 parents = parentsmap[node]
438 p1, p2, linknode, copyfrom = pdata
409 p1, p2, linknode, copyfrom = pdata
439 # Don't follow renames (copyfrom).
410 # Don't follow renames (copyfrom).
440 # remotefilectx.ancestor does that.
411 # remotefilectx.ancestor does that.
441 if p1 != nullid and not copyfrom:
412 if p1 != nullid and not copyfrom:
442 parents.append(p1)
413 parents.append(p1)
443 allparents.add(p1)
414 allparents.add(p1)
444 if p2 != nullid:
415 if p2 != nullid:
445 parents.append(p2)
416 parents.append(p2)
446 allparents.add(p2)
417 allparents.add(p2)
447
418
448 # Breadth first traversal to build linkrev graph
419 # Breadth first traversal to build linkrev graph
449 parentrevs = collections.defaultdict(list)
420 parentrevs = collections.defaultdict(list)
450 revmap = {}
421 revmap = {}
451 queue = collections.deque(((None, n) for n in parentsmap.iterkeys()
422 queue = collections.deque(((None, n) for n in parentsmap.iterkeys()
452 if n not in allparents))
423 if n not in allparents))
453 while queue:
424 while queue:
454 prevrev, current = queue.pop()
425 prevrev, current = queue.pop()
455 if current in revmap:
426 if current in revmap:
456 if prevrev:
427 if prevrev:
457 parentrevs[prevrev].append(revmap[current])
428 parentrevs[prevrev].append(revmap[current])
458 continue
429 continue
459
430
460 # Assign linkrevs in reverse order, so start at
431 # Assign linkrevs in reverse order, so start at
461 # len(parentsmap) and work backwards.
432 # len(parentsmap) and work backwards.
462 currentrev = len(parentsmap) - len(revmap) - 1
433 currentrev = len(parentsmap) - len(revmap) - 1
463 revmap[current] = currentrev
434 revmap[current] = currentrev
464
435
465 if prevrev:
436 if prevrev:
466 parentrevs[prevrev].append(currentrev)
437 parentrevs[prevrev].append(currentrev)
467
438
468 for parent in parentsmap.get(current):
439 for parent in parentsmap.get(current):
469 queue.appendleft((currentrev, parent))
440 queue.appendleft((currentrev, parent))
470
441
471 return revmap, parentrevs.__getitem__
442 return revmap, parentrevs.__getitem__
472
443
473 def strip(self, minlink, transaction):
444 def strip(self, minlink, transaction):
474 pass
445 pass
475
446
476 # misc unused things
447 # misc unused things
477 def files(self):
448 def files(self):
478 return []
449 return []
479
450
480 def checksize(self):
451 def checksize(self):
481 return 0, 0
452 return 0, 0
General Comments 0
You need to be logged in to leave comments. Login now