##// END OF EJS Templates
remotefilelog: remove deprecated API...
Raphaël Gomès -
r49358:bf5dc156 default
parent child Browse files
Show More
@@ -1,516 +1,504 b''
1 1 # remotefilelog.py - filelog implementation where filelog history is stored
2 2 # remotely
3 3 #
4 4 # Copyright 2013 Facebook, Inc.
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import os
12 12
13 13 from mercurial.node import bin
14 14 from mercurial.i18n import _
15 15 from mercurial import (
16 16 ancestor,
17 17 error,
18 18 mdiff,
19 19 pycompat,
20 20 revlog,
21 util,
22 21 )
23 22 from mercurial.utils import storageutil
24 23 from mercurial.revlogutils import flagutil
25 24
26 25 from . import (
27 26 constants,
28 27 fileserverclient,
29 28 shallowutil,
30 29 )
31 30
32 31
33 32 class remotefilelognodemap(object):
34 33 def __init__(self, filename, store):
35 34 self._filename = filename
36 35 self._store = store
37 36
38 37 def __contains__(self, node):
39 38 missing = self._store.getmissing([(self._filename, node)])
40 39 return not bool(missing)
41 40
42 41 def __get__(self, node):
43 42 if node not in self:
44 43 raise KeyError(node)
45 44 return node
46 45
47 46
48 47 class remotefilelog(object):
49 48
50 49 _generaldelta = True
51 50 _flagserrorclass = error.RevlogError
52 51
53 52 def __init__(self, opener, path, repo):
54 53 self.opener = opener
55 54 self.filename = path
56 55 self.repo = repo
57 56 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
58 57
59 58 self.version = 1
60 59
61 60 self._flagprocessors = dict(flagutil.flagprocessors)
62 61
63 62 def read(self, node):
64 63 """returns the file contents at this node"""
65 64 t = self.revision(node)
66 65 if not t.startswith(b'\1\n'):
67 66 return t
68 67 s = t.index(b'\1\n', 2)
69 68 return t[s + 2 :]
70 69
71 70 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
72 71 # hash with the metadata, like in vanilla filelogs
73 72 hashtext = shallowutil.createrevlogtext(
74 73 text, meta.get(b'copy'), meta.get(b'copyrev')
75 74 )
76 75 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
77 76 return self.addrevision(
78 77 hashtext, transaction, linknode, p1, p2, node=node
79 78 )
80 79
81 80 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
82 81 # text passed to "_createfileblob" does not include filelog metadata
83 82 header = shallowutil.buildfileblobheader(len(text), flags)
84 83 data = b"%s\0%s" % (header, text)
85 84
86 85 realp1 = p1
87 86 copyfrom = b""
88 87 if meta and b'copy' in meta:
89 88 copyfrom = meta[b'copy']
90 89 realp1 = bin(meta[b'copyrev'])
91 90
92 91 data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
93 92
94 93 visited = set()
95 94
96 95 pancestors = {}
97 96 queue = []
98 97 if realp1 != self.repo.nullid:
99 98 p1flog = self
100 99 if copyfrom:
101 100 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
102 101
103 102 pancestors.update(p1flog.ancestormap(realp1))
104 103 queue.append(realp1)
105 104 visited.add(realp1)
106 105 if p2 != self.repo.nullid:
107 106 pancestors.update(self.ancestormap(p2))
108 107 queue.append(p2)
109 108 visited.add(p2)
110 109
111 110 ancestortext = b""
112 111
113 112 # add the ancestors in topological order
114 113 while queue:
115 114 c = queue.pop(0)
116 115 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
117 116
118 117 pacopyfrom = pacopyfrom or b''
119 118 ancestortext += b"%s%s%s%s%s\0" % (
120 119 c,
121 120 pa1,
122 121 pa2,
123 122 ancestorlinknode,
124 123 pacopyfrom,
125 124 )
126 125
127 126 if pa1 != self.repo.nullid and pa1 not in visited:
128 127 queue.append(pa1)
129 128 visited.add(pa1)
130 129 if pa2 != self.repo.nullid and pa2 not in visited:
131 130 queue.append(pa2)
132 131 visited.add(pa2)
133 132
134 133 data += ancestortext
135 134
136 135 return data
137 136
138 137 def addrevision(
139 138 self,
140 139 text,
141 140 transaction,
142 141 linknode,
143 142 p1,
144 143 p2,
145 144 cachedelta=None,
146 145 node=None,
147 146 flags=revlog.REVIDX_DEFAULT_FLAGS,
148 147 sidedata=None,
149 148 ):
150 149 # text passed to "addrevision" includes hg filelog metadata header
151 150 if node is None:
152 151 node = storageutil.hashrevisionsha1(text, p1, p2)
153 152
154 153 meta, metaoffset = storageutil.parsemeta(text)
155 154 rawtext, validatehash = flagutil.processflagswrite(
156 155 self,
157 156 text,
158 157 flags,
159 158 )
160 159 return self.addrawrevision(
161 160 rawtext,
162 161 transaction,
163 162 linknode,
164 163 p1,
165 164 p2,
166 165 node,
167 166 flags,
168 167 cachedelta,
169 168 _metatuple=(meta, metaoffset),
170 169 )
171 170
172 171 def addrawrevision(
173 172 self,
174 173 rawtext,
175 174 transaction,
176 175 linknode,
177 176 p1,
178 177 p2,
179 178 node,
180 179 flags,
181 180 cachedelta=None,
182 181 _metatuple=None,
183 182 ):
184 183 if _metatuple:
185 184 # _metatuple: used by "addrevision" internally by remotefilelog
186 185 # meta was parsed confidently
187 186 meta, metaoffset = _metatuple
188 187 else:
189 188 # not from self.addrevision, but something else (repo._filecommit)
190 189 # calls addrawrevision directly. remotefilelog needs to get and
191 190 # strip filelog metadata.
192 191 # we don't have confidence about whether rawtext contains filelog
193 192 # metadata or not (flag processor could replace it), so we just
194 193 # parse it as best-effort.
195 194 # in LFS (flags != 0)'s case, the best way is to call LFS code to
196 195 # get the meta information, instead of storageutil.parsemeta.
197 196 meta, metaoffset = storageutil.parsemeta(rawtext)
198 197 if flags != 0:
199 198 # when flags != 0, be conservative and do not mangle rawtext, since
200 199 # a read flag processor expects the text not being mangled at all.
201 200 metaoffset = 0
202 201 if metaoffset:
203 202 # remotefilelog fileblob stores copy metadata in its ancestortext,
204 203 # not its main blob. so we need to remove filelog metadata
205 204 # (containing copy information) from text.
206 205 blobtext = rawtext[metaoffset:]
207 206 else:
208 207 blobtext = rawtext
209 208 data = self._createfileblob(
210 209 blobtext, meta, flags, p1, p2, node, linknode
211 210 )
212 211 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
213 212
214 213 return node
215 214
216 215 def renamed(self, node):
217 216 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
218 217 p1, p2, linknode, copyfrom = ancestors[node]
219 218 if copyfrom:
220 219 return (copyfrom, p1)
221 220
222 221 return False
223 222
224 223 def size(self, node):
225 224 """return the size of a given revision"""
226 225 return len(self.read(node))
227 226
228 227 rawsize = size
229 228
230 229 def cmp(self, node, text):
231 230 """compare text with a given file revision
232 231
233 232 returns True if text is different than what is stored.
234 233 """
235 234
236 235 if node == self.repo.nullid:
237 236 return True
238 237
239 238 nodetext = self.read(node)
240 239 return nodetext != text
241 240
242 241 def __nonzero__(self):
243 242 return True
244 243
245 244 __bool__ = __nonzero__
246 245
247 246 def __len__(self):
248 247 if self.filename == b'.hgtags':
249 248 # The length of .hgtags is used to fast path tag checking.
250 249 # remotefilelog doesn't support .hgtags since the entire .hgtags
251 250 # history is needed. Use the excludepattern setting to make
252 251 # .hgtags a normal filelog.
253 252 return 0
254 253
255 254 raise RuntimeError(b"len not supported")
256 255
257 256 def heads(self):
258 257 # Fake heads of the filelog to satisfy hgweb.
259 258 return []
260 259
261 260 def empty(self):
262 261 return False
263 262
264 263 def flags(self, node):
265 264 if isinstance(node, int):
266 265 raise error.ProgrammingError(
267 266 b'remotefilelog does not accept integer rev for flags'
268 267 )
269 268 store = self.repo.contentstore
270 269 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
271 270
272 271 def parents(self, node):
273 272 if node == self.repo.nullid:
274 273 return self.repo.nullid, self.repo.nullid
275 274
276 275 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
277 276 p1, p2, linknode, copyfrom = ancestormap[node]
278 277 if copyfrom:
279 278 p1 = self.repo.nullid
280 279
281 280 return p1, p2
282 281
283 282 def parentrevs(self, rev):
284 283 # TODO(augie): this is a node and should be a rev, but for now
285 284 # nothing in core seems to actually break.
286 285 return self.parents(rev)
287 286
288 287 def linknode(self, node):
289 288 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
290 289 p1, p2, linknode, copyfrom = ancestormap[node]
291 290 return linknode
292 291
293 292 def linkrev(self, node):
294 293 return self.repo.unfiltered().changelog.rev(self.linknode(node))
295 294
296 295 def emitrevisions(
297 296 self,
298 297 nodes,
299 298 nodesorder=None,
300 299 revisiondata=False,
301 300 assumehaveparentrevisions=False,
302 301 deltaprevious=False,
303 302 deltamode=None,
304 303 sidedata_helpers=None,
305 304 ):
306 305 # we don't use any of these parameters here
307 306 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
308 307 del deltamode
309 308 prevnode = None
310 309 for node in nodes:
311 310 p1, p2 = self.parents(node)
312 311 if prevnode is None:
313 312 basenode = prevnode = p1
314 313 if basenode == node:
315 314 basenode = self.repo.nullid
316 315 if basenode != self.repo.nullid:
317 316 revision = None
318 317 delta = self.revdiff(basenode, node)
319 318 else:
320 319 revision = self.rawdata(node)
321 320 delta = None
322 321 yield revlog.revlogrevisiondelta(
323 322 node=node,
324 323 p1node=p1,
325 324 p2node=p2,
326 325 linknode=self.linknode(node),
327 326 basenode=basenode,
328 327 flags=self.flags(node),
329 328 baserevisionsize=None,
330 329 revision=revision,
331 330 delta=delta,
332 331 # Sidedata is not supported yet
333 332 sidedata=None,
334 333 # Protocol flags are not used yet
335 334 protocol_flags=0,
336 335 )
337 336
338 337 def revdiff(self, node1, node2):
339 338 return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2))
340 339
341 340 def lookup(self, node):
342 341 if len(node) == 40:
343 342 node = bin(node)
344 343 if len(node) != 20:
345 344 raise error.LookupError(
346 345 node, self.filename, _(b'invalid lookup input')
347 346 )
348 347
349 348 return node
350 349
351 350 def rev(self, node):
352 351 # This is a hack to make TortoiseHG work.
353 352 return node
354 353
355 354 def node(self, rev):
356 355 # This is a hack.
357 356 if isinstance(rev, int):
358 357 raise error.ProgrammingError(
359 358 b'remotefilelog does not convert integer rev to node'
360 359 )
361 360 return rev
362 361
363 def _processflags(self, text, flags, operation, raw=False):
364 """deprecated entry point to access flag processors"""
365 msg = b'_processflag(...) use the specialized variant'
366 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
367 if raw:
368 return text, flagutil.processflagsraw(self, text, flags)
369 elif operation == b'read':
370 return flagutil.processflagsread(self, text, flags)
371 else: # write operation
372 return flagutil.processflagswrite(self, text, flags)
373
374 362 def revision(self, node, raw=False):
375 363 """returns the revlog contents at this node.
376 364 this includes the meta data traditionally included in file revlogs.
377 365 this is generally only used for bundling and communicating with vanilla
378 366 hg clients.
379 367 """
380 368 if node == self.repo.nullid:
381 369 return b""
382 370 if len(node) != 20:
383 371 raise error.LookupError(
384 372 node, self.filename, _(b'invalid revision input')
385 373 )
386 374 if (
387 375 node == self.repo.nodeconstants.wdirid
388 376 or node in self.repo.nodeconstants.wdirfilenodeids
389 377 ):
390 378 raise error.WdirUnsupported
391 379
392 380 store = self.repo.contentstore
393 381 rawtext = store.get(self.filename, node)
394 382 if raw:
395 383 return rawtext
396 384 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
397 385 if flags == 0:
398 386 return rawtext
399 387 return flagutil.processflagsread(self, rawtext, flags)[0]
400 388
401 389 def rawdata(self, node):
402 390 return self.revision(node, raw=False)
403 391
404 392 def _read(self, id):
405 393 """reads the raw file blob from disk, cache, or server"""
406 394 fileservice = self.repo.fileservice
407 395 localcache = fileservice.localcache
408 396 cachekey = fileserverclient.getcachekey(
409 397 self.repo.name, self.filename, id
410 398 )
411 399 try:
412 400 return localcache.read(cachekey)
413 401 except KeyError:
414 402 pass
415 403
416 404 localkey = fileserverclient.getlocalkey(self.filename, id)
417 405 localpath = os.path.join(self.localpath, localkey)
418 406 try:
419 407 return shallowutil.readfile(localpath)
420 408 except IOError:
421 409 pass
422 410
423 411 fileservice.prefetch([(self.filename, id)])
424 412 try:
425 413 return localcache.read(cachekey)
426 414 except KeyError:
427 415 pass
428 416
429 417 raise error.LookupError(id, self.filename, _(b'no node'))
430 418
431 419 def ancestormap(self, node):
432 420 return self.repo.metadatastore.getancestors(self.filename, node)
433 421
434 422 def ancestor(self, a, b):
435 423 if a == self.repo.nullid or b == self.repo.nullid:
436 424 return self.repo.nullid
437 425
438 426 revmap, parentfunc = self._buildrevgraph(a, b)
439 427 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
440 428
441 429 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
442 430 if ancs:
443 431 # choose a consistent winner when there's a tie
444 432 return min(map(nodemap.__getitem__, ancs))
445 433 return self.repo.nullid
446 434
447 435 def commonancestorsheads(self, a, b):
448 436 """calculate all the heads of the common ancestors of nodes a and b"""
449 437
450 438 if a == self.repo.nullid or b == self.repo.nullid:
451 439 return self.repo.nullid
452 440
453 441 revmap, parentfunc = self._buildrevgraph(a, b)
454 442 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
455 443
456 444 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
457 445 return map(nodemap.__getitem__, ancs)
458 446
459 447 def _buildrevgraph(self, a, b):
460 448 """Builds a numeric revision graph for the given two nodes.
461 449 Returns a node->rev map and a rev->[revs] parent function.
462 450 """
463 451 amap = self.ancestormap(a)
464 452 bmap = self.ancestormap(b)
465 453
466 454 # Union the two maps
467 455 parentsmap = collections.defaultdict(list)
468 456 allparents = set()
469 457 for mapping in (amap, bmap):
470 458 for node, pdata in pycompat.iteritems(mapping):
471 459 parents = parentsmap[node]
472 460 p1, p2, linknode, copyfrom = pdata
473 461 # Don't follow renames (copyfrom).
474 462 # remotefilectx.ancestor does that.
475 463 if p1 != self.repo.nullid and not copyfrom:
476 464 parents.append(p1)
477 465 allparents.add(p1)
478 466 if p2 != self.repo.nullid:
479 467 parents.append(p2)
480 468 allparents.add(p2)
481 469
482 470 # Breadth first traversal to build linkrev graph
483 471 parentrevs = collections.defaultdict(list)
484 472 revmap = {}
485 473 queue = collections.deque(
486 474 ((None, n) for n in parentsmap if n not in allparents)
487 475 )
488 476 while queue:
489 477 prevrev, current = queue.pop()
490 478 if current in revmap:
491 479 if prevrev:
492 480 parentrevs[prevrev].append(revmap[current])
493 481 continue
494 482
495 483 # Assign linkrevs in reverse order, so start at
496 484 # len(parentsmap) and work backwards.
497 485 currentrev = len(parentsmap) - len(revmap) - 1
498 486 revmap[current] = currentrev
499 487
500 488 if prevrev:
501 489 parentrevs[prevrev].append(currentrev)
502 490
503 491 for parent in parentsmap.get(current):
504 492 queue.appendleft((currentrev, parent))
505 493
506 494 return revmap, parentrevs.__getitem__
507 495
508 496 def strip(self, minlink, transaction):
509 497 pass
510 498
511 499 # misc unused things
512 500 def files(self):
513 501 return []
514 502
515 503 def checksize(self):
516 504 return 0, 0
General Comments 0
You need to be logged in to leave comments. Login now