##// END OF EJS Templates
remotefilelog: drop dead code...
marmoute -
r52196:e5b710ce default
parent child Browse files
Show More
@@ -1,502 +1,473 b''
1 1 # remotefilelog.py - filelog implementation where filelog history is stored
2 2 # remotely
3 3 #
4 4 # Copyright 2013 Facebook, Inc.
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import collections
10 import os
11 10
12 11 from mercurial.node import bin
13 12 from mercurial.i18n import _
14 13 from mercurial import (
15 14 ancestor,
16 15 error,
17 16 mdiff,
18 17 revlog,
19 18 )
20 19 from mercurial.utils import storageutil
21 20 from mercurial.revlogutils import flagutil
22 21
23 22 from . import (
24 23 constants,
25 fileserverclient,
26 24 shallowutil,
27 25 )
28 26
29 27
30 28 class remotefilelognodemap:
31 29 def __init__(self, filename, store):
32 30 self._filename = filename
33 31 self._store = store
34 32
35 33 def __contains__(self, node):
36 34 missing = self._store.getmissing([(self._filename, node)])
37 35 return not bool(missing)
38 36
39 37 def __get__(self, node):
40 38 if node not in self:
41 39 raise KeyError(node)
42 40 return node
43 41
44 42
45 43 class remotefilelog:
46 44
47 45 _flagserrorclass = error.RevlogError
48 46
49 47 def __init__(self, opener, path, repo):
50 48 self.opener = opener
51 49 self.filename = path
52 50 self.repo = repo
53 51 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
54 52
55 53 self.version = 1
56 54
57 55 self._flagprocessors = dict(flagutil.flagprocessors)
58 56
59 57 def read(self, node):
60 58 """returns the file contents at this node"""
61 59 t = self.revision(node)
62 60 if not t.startswith(b'\1\n'):
63 61 return t
64 62 s = t.index(b'\1\n', 2)
65 63 return t[s + 2 :]
66 64
67 65 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
68 66 # hash with the metadata, like in vanilla filelogs
69 67 hashtext = shallowutil.createrevlogtext(
70 68 text, meta.get(b'copy'), meta.get(b'copyrev')
71 69 )
72 70 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
73 71 return self.addrevision(
74 72 hashtext, transaction, linknode, p1, p2, node=node
75 73 )
76 74
77 75 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
78 76 # text passed to "_createfileblob" does not include filelog metadata
79 77 header = shallowutil.buildfileblobheader(len(text), flags)
80 78 data = b"%s\0%s" % (header, text)
81 79
82 80 realp1 = p1
83 81 copyfrom = b""
84 82 if meta and b'copy' in meta:
85 83 copyfrom = meta[b'copy']
86 84 realp1 = bin(meta[b'copyrev'])
87 85
88 86 data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
89 87
90 88 visited = set()
91 89
92 90 pancestors = {}
93 91 queue = []
94 92 if realp1 != self.repo.nullid:
95 93 p1flog = self
96 94 if copyfrom:
97 95 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
98 96
99 97 pancestors.update(p1flog.ancestormap(realp1))
100 98 queue.append(realp1)
101 99 visited.add(realp1)
102 100 if p2 != self.repo.nullid:
103 101 pancestors.update(self.ancestormap(p2))
104 102 queue.append(p2)
105 103 visited.add(p2)
106 104
107 105 ancestortext = b""
108 106
109 107 # add the ancestors in topological order
110 108 while queue:
111 109 c = queue.pop(0)
112 110 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
113 111
114 112 pacopyfrom = pacopyfrom or b''
115 113 ancestortext += b"%s%s%s%s%s\0" % (
116 114 c,
117 115 pa1,
118 116 pa2,
119 117 ancestorlinknode,
120 118 pacopyfrom,
121 119 )
122 120
123 121 if pa1 != self.repo.nullid and pa1 not in visited:
124 122 queue.append(pa1)
125 123 visited.add(pa1)
126 124 if pa2 != self.repo.nullid and pa2 not in visited:
127 125 queue.append(pa2)
128 126 visited.add(pa2)
129 127
130 128 data += ancestortext
131 129
132 130 return data
133 131
134 132 def addrevision(
135 133 self,
136 134 text,
137 135 transaction,
138 136 linknode,
139 137 p1,
140 138 p2,
141 139 cachedelta=None,
142 140 node=None,
143 141 flags=revlog.REVIDX_DEFAULT_FLAGS,
144 142 sidedata=None,
145 143 ):
146 144 # text passed to "addrevision" includes hg filelog metadata header
147 145 if node is None:
148 146 node = storageutil.hashrevisionsha1(text, p1, p2)
149 147
150 148 meta, metaoffset = storageutil.parsemeta(text)
151 149 rawtext, validatehash = flagutil.processflagswrite(
152 150 self,
153 151 text,
154 152 flags,
155 153 )
156 154 return self.addrawrevision(
157 155 rawtext,
158 156 transaction,
159 157 linknode,
160 158 p1,
161 159 p2,
162 160 node,
163 161 flags,
164 162 cachedelta,
165 163 _metatuple=(meta, metaoffset),
166 164 )
167 165
168 166 def addrawrevision(
169 167 self,
170 168 rawtext,
171 169 transaction,
172 170 linknode,
173 171 p1,
174 172 p2,
175 173 node,
176 174 flags,
177 175 cachedelta=None,
178 176 _metatuple=None,
179 177 ):
180 178 if _metatuple:
181 179 # _metatuple: used by "addrevision" internally by remotefilelog
182 180 # meta was parsed confidently
183 181 meta, metaoffset = _metatuple
184 182 else:
185 183 # not from self.addrevision, but something else (repo._filecommit)
186 184 # calls addrawrevision directly. remotefilelog needs to get and
187 185 # strip filelog metadata.
188 186 # we don't have confidence about whether rawtext contains filelog
189 187 # metadata or not (flag processor could replace it), so we just
190 188 # parse it as best-effort.
191 189 # in LFS (flags != 0)'s case, the best way is to call LFS code to
192 190 # get the meta information, instead of storageutil.parsemeta.
193 191 meta, metaoffset = storageutil.parsemeta(rawtext)
194 192 if flags != 0:
195 193 # when flags != 0, be conservative and do not mangle rawtext, since
196 194 # a read flag processor expects the text not being mangled at all.
197 195 metaoffset = 0
198 196 if metaoffset:
199 197 # remotefilelog fileblob stores copy metadata in its ancestortext,
200 198 # not its main blob. so we need to remove filelog metadata
201 199 # (containing copy information) from text.
202 200 blobtext = rawtext[metaoffset:]
203 201 else:
204 202 blobtext = rawtext
205 203 data = self._createfileblob(
206 204 blobtext, meta, flags, p1, p2, node, linknode
207 205 )
208 206 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
209 207
210 208 return node
211 209
212 210 def renamed(self, node):
213 211 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
214 212 p1, p2, linknode, copyfrom = ancestors[node]
215 213 if copyfrom:
216 214 return (copyfrom, p1)
217 215
218 216 return False
219 217
220 218 def size(self, node):
221 219 """return the size of a given revision"""
222 220 return len(self.read(node))
223 221
224 222 rawsize = size
225 223
226 224 def cmp(self, node, text):
227 225 """compare text with a given file revision
228 226
229 227 returns True if text is different than what is stored.
230 228 """
231 229
232 230 if node == self.repo.nullid:
233 231 return True
234 232
235 233 nodetext = self.read(node)
236 234 return nodetext != text
237 235
238 236 def __nonzero__(self):
239 237 return True
240 238
241 239 __bool__ = __nonzero__
242 240
243 241 def __len__(self):
244 242 if self.filename in (b'.hgtags', b'.hgsub', b'.hgsubstate'):
245 243 # Global tag and subrepository support require access to the
246 244 # file history for various performance sensitive operations.
247 245 # excludepattern should be used for repositories depending on
248 246 # those features to fallback to regular filelog.
249 247 return 0
250 248
251 249 raise RuntimeError(b"len not supported")
252 250
253 251 def heads(self):
254 252 # Fake heads of the filelog to satisfy hgweb.
255 253 return []
256 254
257 255 def empty(self):
258 256 return False
259 257
260 258 def flags(self, node):
261 259 if isinstance(node, int):
262 260 raise error.ProgrammingError(
263 261 b'remotefilelog does not accept integer rev for flags'
264 262 )
265 263 store = self.repo.contentstore
266 264 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
267 265
268 266 def parents(self, node):
269 267 if node == self.repo.nullid:
270 268 return self.repo.nullid, self.repo.nullid
271 269
272 270 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
273 271 p1, p2, linknode, copyfrom = ancestormap[node]
274 272 if copyfrom:
275 273 p1 = self.repo.nullid
276 274
277 275 return p1, p2
278 276
279 277 def parentrevs(self, rev):
280 278 # TODO(augie): this is a node and should be a rev, but for now
281 279 # nothing in core seems to actually break.
282 280 return self.parents(rev)
283 281
284 282 def linknode(self, node):
285 283 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
286 284 p1, p2, linknode, copyfrom = ancestormap[node]
287 285 return linknode
288 286
289 287 def linkrev(self, node):
290 288 return self.repo.unfiltered().changelog.rev(self.linknode(node))
291 289
292 290 def emitrevisions(
293 291 self,
294 292 nodes,
295 293 nodesorder=None,
296 294 revisiondata=False,
297 295 assumehaveparentrevisions=False,
298 296 deltaprevious=False,
299 297 deltamode=None,
300 298 sidedata_helpers=None,
301 299 debug_info=None,
302 300 ):
303 301 # we don't use any of these parameters here
304 302 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
305 303 del deltamode
306 304 prevnode = None
307 305 for node in nodes:
308 306 p1, p2 = self.parents(node)
309 307 if prevnode is None:
310 308 basenode = prevnode = p1
311 309 if basenode == node:
312 310 basenode = self.repo.nullid
313 311 if basenode != self.repo.nullid:
314 312 revision = None
315 313 delta = self.revdiff(basenode, node)
316 314 else:
317 315 revision = self.rawdata(node)
318 316 delta = None
319 317 yield revlog.revlogrevisiondelta(
320 318 node=node,
321 319 p1node=p1,
322 320 p2node=p2,
323 321 linknode=self.linknode(node),
324 322 basenode=basenode,
325 323 flags=self.flags(node),
326 324 baserevisionsize=None,
327 325 revision=revision,
328 326 delta=delta,
329 327 # Sidedata is not supported yet
330 328 sidedata=None,
331 329 # Protocol flags are not used yet
332 330 protocol_flags=0,
333 331 )
334 332
335 333 def revdiff(self, node1, node2):
336 334 return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2))
337 335
338 336 def lookup(self, node):
339 337 if len(node) == 40:
340 338 node = bin(node)
341 339 if len(node) != 20:
342 340 raise error.LookupError(
343 341 node, self.filename, _(b'invalid lookup input')
344 342 )
345 343
346 344 return node
347 345
348 346 def rev(self, node):
349 347 # This is a hack to make TortoiseHG work.
350 348 return node
351 349
352 350 def node(self, rev):
353 351 # This is a hack.
354 352 if isinstance(rev, int):
355 353 raise error.ProgrammingError(
356 354 b'remotefilelog does not convert integer rev to node'
357 355 )
358 356 return rev
359 357
360 358 def revision(self, node, raw=False):
361 359 """returns the revlog contents at this node.
362 360 this includes the meta data traditionally included in file revlogs.
363 361 this is generally only used for bundling and communicating with vanilla
364 362 hg clients.
365 363 """
366 364 if node == self.repo.nullid:
367 365 return b""
368 366 if len(node) != 20:
369 367 raise error.LookupError(
370 368 node, self.filename, _(b'invalid revision input')
371 369 )
372 370 if (
373 371 node == self.repo.nodeconstants.wdirid
374 372 or node in self.repo.nodeconstants.wdirfilenodeids
375 373 ):
376 374 raise error.WdirUnsupported
377 375
378 376 store = self.repo.contentstore
379 377 rawtext = store.get(self.filename, node)
380 378 if raw:
381 379 return rawtext
382 380 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
383 381 if flags == 0:
384 382 return rawtext
385 383 return flagutil.processflagsread(self, rawtext, flags)[0]
386 384
387 385 def rawdata(self, node):
388 386 return self.revision(node, raw=False)
389 387
390 def _read(self, id):
391 """reads the raw file blob from disk, cache, or server"""
392 fileservice = self.repo.fileservice
393 localcache = fileservice.localcache
394 cachekey = fileserverclient.getcachekey(
395 self.repo.name, self.filename, id
396 )
397 try:
398 return localcache.read(cachekey)
399 except KeyError:
400 pass
401
402 localkey = fileserverclient.getlocalkey(self.filename, id)
403 localpath = os.path.join(self.localpath, localkey)
404 try:
405 return shallowutil.readfile(localpath)
406 except IOError:
407 pass
408
409 fileservice.prefetch([(self.filename, id)])
410 try:
411 return localcache.read(cachekey)
412 except KeyError:
413 pass
414
415 raise error.LookupError(id, self.filename, _(b'no node'))
416
417 388 def ancestormap(self, node):
418 389 return self.repo.metadatastore.getancestors(self.filename, node)
419 390
420 391 def ancestor(self, a, b):
421 392 if a == self.repo.nullid or b == self.repo.nullid:
422 393 return self.repo.nullid
423 394
424 395 revmap, parentfunc = self._buildrevgraph(a, b)
425 396 nodemap = {v: k for (k, v) in revmap.items()}
426 397
427 398 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
428 399 if ancs:
429 400 # choose a consistent winner when there's a tie
430 401 return min(map(nodemap.__getitem__, ancs))
431 402 return self.repo.nullid
432 403
433 404 def commonancestorsheads(self, a, b):
434 405 """calculate all the heads of the common ancestors of nodes a and b"""
435 406
436 407 if a == self.repo.nullid or b == self.repo.nullid:
437 408 return self.repo.nullid
438 409
439 410 revmap, parentfunc = self._buildrevgraph(a, b)
440 411 nodemap = {v: k for (k, v) in revmap.items()}
441 412
442 413 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
443 414 return map(nodemap.__getitem__, ancs)
444 415
445 416 def _buildrevgraph(self, a, b):
446 417 """Builds a numeric revision graph for the given two nodes.
447 418 Returns a node->rev map and a rev->[revs] parent function.
448 419 """
449 420 amap = self.ancestormap(a)
450 421 bmap = self.ancestormap(b)
451 422
452 423 # Union the two maps
453 424 parentsmap = collections.defaultdict(list)
454 425 allparents = set()
455 426 for mapping in (amap, bmap):
456 427 for node, pdata in mapping.items():
457 428 parents = parentsmap[node]
458 429 p1, p2, linknode, copyfrom = pdata
459 430 # Don't follow renames (copyfrom).
460 431 # remotefilectx.ancestor does that.
461 432 if p1 != self.repo.nullid and not copyfrom:
462 433 parents.append(p1)
463 434 allparents.add(p1)
464 435 if p2 != self.repo.nullid:
465 436 parents.append(p2)
466 437 allparents.add(p2)
467 438
468 439 # Breadth first traversal to build linkrev graph
469 440 parentrevs = collections.defaultdict(list)
470 441 revmap = {}
471 442 queue = collections.deque(
472 443 ((None, n) for n in parentsmap if n not in allparents)
473 444 )
474 445 while queue:
475 446 prevrev, current = queue.pop()
476 447 if current in revmap:
477 448 if prevrev:
478 449 parentrevs[prevrev].append(revmap[current])
479 450 continue
480 451
481 452 # Assign linkrevs in reverse order, so start at
482 453 # len(parentsmap) and work backwards.
483 454 currentrev = len(parentsmap) - len(revmap) - 1
484 455 revmap[current] = currentrev
485 456
486 457 if prevrev:
487 458 parentrevs[prevrev].append(currentrev)
488 459
489 460 for parent in parentsmap.get(current):
490 461 queue.appendleft((currentrev, parent))
491 462
492 463 return revmap, parentrevs.__getitem__
493 464
494 465 def strip(self, minlink, transaction):
495 466 pass
496 467
497 468 # misc unused things
498 469 def files(self):
499 470 return []
500 471
501 472 def checksize(self):
502 473 return 0, 0
General Comments 0
You need to be logged in to leave comments. Login now