##// END OF EJS Templates
manifest: add rawsize() proxy (API)...
Gregory Szorc -
r39894:9534fe1e default
parent child Browse files
Show More
@@ -1,2017 +1,2020 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import itertools
12 12 import struct
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 )
22 22 from . import (
23 23 error,
24 24 mdiff,
25 25 policy,
26 26 pycompat,
27 27 repository,
28 28 revlog,
29 29 util,
30 30 )
31 31 from .utils import (
32 32 interfaceutil,
33 33 )
34 34
35 35 parsers = policy.importmod(r'parsers')
36 36 propertycache = util.propertycache
37 37
38 38 def _parse(data):
39 39 # This method does a little bit of excessive-looking
40 40 # precondition checking. This is so that the behavior of this
41 41 # class exactly matches its C counterpart to try and help
42 42 # prevent surprise breakage for anyone that develops against
43 43 # the pure version.
44 44 if data and data[-1:] != '\n':
45 45 raise ValueError('Manifest did not end in a newline.')
46 46 prev = None
47 47 for l in data.splitlines():
48 48 if prev is not None and prev > l:
49 49 raise ValueError('Manifest lines not in sorted order.')
50 50 prev = l
51 51 f, n = l.split('\0')
52 52 if len(n) > 40:
53 53 yield f, bin(n[:40]), n[40:]
54 54 else:
55 55 yield f, bin(n), ''
56 56
57 57 def _text(it):
58 58 files = []
59 59 lines = []
60 60 for f, n, fl in it:
61 61 files.append(f)
62 62 # if this is changed to support newlines in filenames,
63 63 # be sure to check the templates/ dir again (especially *-raw.tmpl)
64 64 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
65 65
66 66 _checkforbidden(files)
67 67 return ''.join(lines)
68 68
69 69 class lazymanifestiter(object):
70 70 def __init__(self, lm):
71 71 self.pos = 0
72 72 self.lm = lm
73 73
74 74 def __iter__(self):
75 75 return self
76 76
77 77 def next(self):
78 78 try:
79 79 data, pos = self.lm._get(self.pos)
80 80 except IndexError:
81 81 raise StopIteration
82 82 if pos == -1:
83 83 self.pos += 1
84 84 return data[0]
85 85 self.pos += 1
86 86 zeropos = data.find('\x00', pos)
87 87 return data[pos:zeropos]
88 88
89 89 __next__ = next
90 90
91 91 class lazymanifestiterentries(object):
92 92 def __init__(self, lm):
93 93 self.lm = lm
94 94 self.pos = 0
95 95
96 96 def __iter__(self):
97 97 return self
98 98
99 99 def next(self):
100 100 try:
101 101 data, pos = self.lm._get(self.pos)
102 102 except IndexError:
103 103 raise StopIteration
104 104 if pos == -1:
105 105 self.pos += 1
106 106 return data
107 107 zeropos = data.find('\x00', pos)
108 108 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
109 109 zeropos + 1, 40)
110 110 flags = self.lm._getflags(data, self.pos, zeropos)
111 111 self.pos += 1
112 112 return (data[pos:zeropos], hashval, flags)
113 113
114 114 __next__ = next
115 115
116 116 def unhexlify(data, extra, pos, length):
117 117 s = bin(data[pos:pos + length])
118 118 if extra:
119 119 s += chr(extra & 0xff)
120 120 return s
121 121
122 122 def _cmp(a, b):
123 123 return (a > b) - (a < b)
124 124
125 125 class _lazymanifest(object):
126 126 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
127 127 if positions is None:
128 128 self.positions = self.findlines(data)
129 129 self.extrainfo = [0] * len(self.positions)
130 130 self.data = data
131 131 self.extradata = []
132 132 else:
133 133 self.positions = positions[:]
134 134 self.extrainfo = extrainfo[:]
135 135 self.extradata = extradata[:]
136 136 self.data = data
137 137
138 138 def findlines(self, data):
139 139 if not data:
140 140 return []
141 141 pos = data.find("\n")
142 142 if pos == -1 or data[-1:] != '\n':
143 143 raise ValueError("Manifest did not end in a newline.")
144 144 positions = [0]
145 145 prev = data[:data.find('\x00')]
146 146 while pos < len(data) - 1 and pos != -1:
147 147 positions.append(pos + 1)
148 148 nexts = data[pos + 1:data.find('\x00', pos + 1)]
149 149 if nexts < prev:
150 150 raise ValueError("Manifest lines not in sorted order.")
151 151 prev = nexts
152 152 pos = data.find("\n", pos + 1)
153 153 return positions
154 154
155 155 def _get(self, index):
156 156 # get the position encoded in pos:
157 157 # positive number is an index in 'data'
158 158 # negative number is in extrapieces
159 159 pos = self.positions[index]
160 160 if pos >= 0:
161 161 return self.data, pos
162 162 return self.extradata[-pos - 1], -1
163 163
164 164 def _getkey(self, pos):
165 165 if pos >= 0:
166 166 return self.data[pos:self.data.find('\x00', pos + 1)]
167 167 return self.extradata[-pos - 1][0]
168 168
169 169 def bsearch(self, key):
170 170 first = 0
171 171 last = len(self.positions) - 1
172 172
173 173 while first <= last:
174 174 midpoint = (first + last)//2
175 175 nextpos = self.positions[midpoint]
176 176 candidate = self._getkey(nextpos)
177 177 r = _cmp(key, candidate)
178 178 if r == 0:
179 179 return midpoint
180 180 else:
181 181 if r < 0:
182 182 last = midpoint - 1
183 183 else:
184 184 first = midpoint + 1
185 185 return -1
186 186
187 187 def bsearch2(self, key):
188 188 # same as the above, but will always return the position
189 189 # done for performance reasons
190 190 first = 0
191 191 last = len(self.positions) - 1
192 192
193 193 while first <= last:
194 194 midpoint = (first + last)//2
195 195 nextpos = self.positions[midpoint]
196 196 candidate = self._getkey(nextpos)
197 197 r = _cmp(key, candidate)
198 198 if r == 0:
199 199 return (midpoint, True)
200 200 else:
201 201 if r < 0:
202 202 last = midpoint - 1
203 203 else:
204 204 first = midpoint + 1
205 205 return (first, False)
206 206
207 207 def __contains__(self, key):
208 208 return self.bsearch(key) != -1
209 209
210 210 def _getflags(self, data, needle, pos):
211 211 start = pos + 41
212 212 end = data.find("\n", start)
213 213 if end == -1:
214 214 end = len(data) - 1
215 215 if start == end:
216 216 return ''
217 217 return self.data[start:end]
218 218
219 219 def __getitem__(self, key):
220 220 if not isinstance(key, bytes):
221 221 raise TypeError("getitem: manifest keys must be a bytes.")
222 222 needle = self.bsearch(key)
223 223 if needle == -1:
224 224 raise KeyError
225 225 data, pos = self._get(needle)
226 226 if pos == -1:
227 227 return (data[1], data[2])
228 228 zeropos = data.find('\x00', pos)
229 229 assert 0 <= needle <= len(self.positions)
230 230 assert len(self.extrainfo) == len(self.positions)
231 231 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
232 232 flags = self._getflags(data, needle, zeropos)
233 233 return (hashval, flags)
234 234
235 235 def __delitem__(self, key):
236 236 needle, found = self.bsearch2(key)
237 237 if not found:
238 238 raise KeyError
239 239 cur = self.positions[needle]
240 240 self.positions = self.positions[:needle] + self.positions[needle + 1:]
241 241 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
242 242 if cur >= 0:
243 243 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
244 244
245 245 def __setitem__(self, key, value):
246 246 if not isinstance(key, bytes):
247 247 raise TypeError("setitem: manifest keys must be a byte string.")
248 248 if not isinstance(value, tuple) or len(value) != 2:
249 249 raise TypeError("Manifest values must be a tuple of (node, flags).")
250 250 hashval = value[0]
251 251 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
252 252 raise TypeError("node must be a 20-byte byte string")
253 253 flags = value[1]
254 254 if len(hashval) == 22:
255 255 hashval = hashval[:-1]
256 256 if not isinstance(flags, bytes) or len(flags) > 1:
257 257 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
258 258 needle, found = self.bsearch2(key)
259 259 if found:
260 260 # put the item
261 261 pos = self.positions[needle]
262 262 if pos < 0:
263 263 self.extradata[-pos - 1] = (key, hashval, value[1])
264 264 else:
265 265 # just don't bother
266 266 self.extradata.append((key, hashval, value[1]))
267 267 self.positions[needle] = -len(self.extradata)
268 268 else:
269 269 # not found, put it in with extra positions
270 270 self.extradata.append((key, hashval, value[1]))
271 271 self.positions = (self.positions[:needle] + [-len(self.extradata)]
272 272 + self.positions[needle:])
273 273 self.extrainfo = (self.extrainfo[:needle] + [0] +
274 274 self.extrainfo[needle:])
275 275
276 276 def copy(self):
277 277 # XXX call _compact like in C?
278 278 return _lazymanifest(self.data, self.positions, self.extrainfo,
279 279 self.extradata)
280 280
281 281 def _compact(self):
282 282 # hopefully not called TOO often
283 283 if len(self.extradata) == 0:
284 284 return
285 285 l = []
286 286 last_cut = 0
287 287 i = 0
288 288 offset = 0
289 289 self.extrainfo = [0] * len(self.positions)
290 290 while i < len(self.positions):
291 291 if self.positions[i] >= 0:
292 292 cur = self.positions[i]
293 293 last_cut = cur
294 294 while True:
295 295 self.positions[i] = offset
296 296 i += 1
297 297 if i == len(self.positions) or self.positions[i] < 0:
298 298 break
299 299 offset += self.positions[i] - cur
300 300 cur = self.positions[i]
301 301 end_cut = self.data.find('\n', cur)
302 302 if end_cut != -1:
303 303 end_cut += 1
304 304 offset += end_cut - cur
305 305 l.append(self.data[last_cut:end_cut])
306 306 else:
307 307 while i < len(self.positions) and self.positions[i] < 0:
308 308 cur = self.positions[i]
309 309 t = self.extradata[-cur - 1]
310 310 l.append(self._pack(t))
311 311 self.positions[i] = offset
312 312 if len(t[1]) > 20:
313 313 self.extrainfo[i] = ord(t[1][21])
314 314 offset += len(l[-1])
315 315 i += 1
316 316 self.data = ''.join(l)
317 317 self.extradata = []
318 318
319 319 def _pack(self, d):
320 320 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
321 321
322 322 def text(self):
323 323 self._compact()
324 324 return self.data
325 325
326 326 def diff(self, m2, clean=False):
327 327 '''Finds changes between the current manifest and m2.'''
328 328 # XXX think whether efficiency matters here
329 329 diff = {}
330 330
331 331 for fn, e1, flags in self.iterentries():
332 332 if fn not in m2:
333 333 diff[fn] = (e1, flags), (None, '')
334 334 else:
335 335 e2 = m2[fn]
336 336 if (e1, flags) != e2:
337 337 diff[fn] = (e1, flags), e2
338 338 elif clean:
339 339 diff[fn] = None
340 340
341 341 for fn, e2, flags in m2.iterentries():
342 342 if fn not in self:
343 343 diff[fn] = (None, ''), (e2, flags)
344 344
345 345 return diff
346 346
347 347 def iterentries(self):
348 348 return lazymanifestiterentries(self)
349 349
350 350 def iterkeys(self):
351 351 return lazymanifestiter(self)
352 352
353 353 def __iter__(self):
354 354 return lazymanifestiter(self)
355 355
356 356 def __len__(self):
357 357 return len(self.positions)
358 358
359 359 def filtercopy(self, filterfn):
360 360 # XXX should be optimized
361 361 c = _lazymanifest('')
362 362 for f, n, fl in self.iterentries():
363 363 if filterfn(f):
364 364 c[f] = n, fl
365 365 return c
366 366
367 367 try:
368 368 _lazymanifest = parsers.lazymanifest
369 369 except AttributeError:
370 370 pass
371 371
372 372 @interfaceutil.implementer(repository.imanifestdict)
373 373 class manifestdict(object):
374 374 def __init__(self, data=''):
375 375 self._lm = _lazymanifest(data)
376 376
377 377 def __getitem__(self, key):
378 378 return self._lm[key][0]
379 379
380 380 def find(self, key):
381 381 return self._lm[key]
382 382
383 383 def __len__(self):
384 384 return len(self._lm)
385 385
386 386 def __nonzero__(self):
387 387 # nonzero is covered by the __len__ function, but implementing it here
388 388 # makes it easier for extensions to override.
389 389 return len(self._lm) != 0
390 390
391 391 __bool__ = __nonzero__
392 392
393 393 def __setitem__(self, key, node):
394 394 self._lm[key] = node, self.flags(key, '')
395 395
396 396 def __contains__(self, key):
397 397 if key is None:
398 398 return False
399 399 return key in self._lm
400 400
401 401 def __delitem__(self, key):
402 402 del self._lm[key]
403 403
404 404 def __iter__(self):
405 405 return self._lm.__iter__()
406 406
407 407 def iterkeys(self):
408 408 return self._lm.iterkeys()
409 409
410 410 def keys(self):
411 411 return list(self.iterkeys())
412 412
413 413 def filesnotin(self, m2, match=None):
414 414 '''Set of files in this manifest that are not in the other'''
415 415 if match:
416 416 m1 = self.matches(match)
417 417 m2 = m2.matches(match)
418 418 return m1.filesnotin(m2)
419 419 diff = self.diff(m2)
420 420 files = set(filepath
421 421 for filepath, hashflags in diff.iteritems()
422 422 if hashflags[1][0] is None)
423 423 return files
424 424
425 425 @propertycache
426 426 def _dirs(self):
427 427 return util.dirs(self)
428 428
429 429 def dirs(self):
430 430 return self._dirs
431 431
432 432 def hasdir(self, dir):
433 433 return dir in self._dirs
434 434
435 435 def _filesfastpath(self, match):
436 436 '''Checks whether we can correctly and quickly iterate over matcher
437 437 files instead of over manifest files.'''
438 438 files = match.files()
439 439 return (len(files) < 100 and (match.isexact() or
440 440 (match.prefix() and all(fn in self for fn in files))))
441 441
442 442 def walk(self, match):
443 443 '''Generates matching file names.
444 444
445 445 Equivalent to manifest.matches(match).iterkeys(), but without creating
446 446 an entirely new manifest.
447 447
448 448 It also reports nonexistent files by marking them bad with match.bad().
449 449 '''
450 450 if match.always():
451 451 for f in iter(self):
452 452 yield f
453 453 return
454 454
455 455 fset = set(match.files())
456 456
457 457 # avoid the entire walk if we're only looking for specific files
458 458 if self._filesfastpath(match):
459 459 for fn in sorted(fset):
460 460 yield fn
461 461 return
462 462
463 463 for fn in self:
464 464 if fn in fset:
465 465 # specified pattern is the exact name
466 466 fset.remove(fn)
467 467 if match(fn):
468 468 yield fn
469 469
470 470 # for dirstate.walk, files=['.'] means "walk the whole tree".
471 471 # follow that here, too
472 472 fset.discard('.')
473 473
474 474 for fn in sorted(fset):
475 475 if not self.hasdir(fn):
476 476 match.bad(fn, None)
477 477
478 478 def matches(self, match):
479 479 '''generate a new manifest filtered by the match argument'''
480 480 if match.always():
481 481 return self.copy()
482 482
483 483 if self._filesfastpath(match):
484 484 m = manifestdict()
485 485 lm = self._lm
486 486 for fn in match.files():
487 487 if fn in lm:
488 488 m._lm[fn] = lm[fn]
489 489 return m
490 490
491 491 m = manifestdict()
492 492 m._lm = self._lm.filtercopy(match)
493 493 return m
494 494
495 495 def diff(self, m2, match=None, clean=False):
496 496 '''Finds changes between the current manifest and m2.
497 497
498 498 Args:
499 499 m2: the manifest to which this manifest should be compared.
500 500 clean: if true, include files unchanged between these manifests
501 501 with a None value in the returned dictionary.
502 502
503 503 The result is returned as a dict with filename as key and
504 504 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
505 505 nodeid in the current/other manifest and fl1/fl2 is the flag
506 506 in the current/other manifest. Where the file does not exist,
507 507 the nodeid will be None and the flags will be the empty
508 508 string.
509 509 '''
510 510 if match:
511 511 m1 = self.matches(match)
512 512 m2 = m2.matches(match)
513 513 return m1.diff(m2, clean=clean)
514 514 return self._lm.diff(m2._lm, clean)
515 515
516 516 def setflag(self, key, flag):
517 517 self._lm[key] = self[key], flag
518 518
519 519 def get(self, key, default=None):
520 520 try:
521 521 return self._lm[key][0]
522 522 except KeyError:
523 523 return default
524 524
525 525 def flags(self, key, default=''):
526 526 try:
527 527 return self._lm[key][1]
528 528 except KeyError:
529 529 return default
530 530
531 531 def copy(self):
532 532 c = manifestdict()
533 533 c._lm = self._lm.copy()
534 534 return c
535 535
536 536 def items(self):
537 537 return (x[:2] for x in self._lm.iterentries())
538 538
539 539 def iteritems(self):
540 540 return (x[:2] for x in self._lm.iterentries())
541 541
542 542 def iterentries(self):
543 543 return self._lm.iterentries()
544 544
545 545 def text(self):
546 546 # most likely uses native version
547 547 return self._lm.text()
548 548
549 549 def fastdelta(self, base, changes):
550 550 """Given a base manifest text as a bytearray and a list of changes
551 551 relative to that text, compute a delta that can be used by revlog.
552 552 """
553 553 delta = []
554 554 dstart = None
555 555 dend = None
556 556 dline = [""]
557 557 start = 0
558 558 # zero copy representation of base as a buffer
559 559 addbuf = util.buffer(base)
560 560
561 561 changes = list(changes)
562 562 if len(changes) < 1000:
563 563 # start with a readonly loop that finds the offset of
564 564 # each line and creates the deltas
565 565 for f, todelete in changes:
566 566 # bs will either be the index of the item or the insert point
567 567 start, end = _msearch(addbuf, f, start)
568 568 if not todelete:
569 569 h, fl = self._lm[f]
570 570 l = "%s\0%s%s\n" % (f, hex(h), fl)
571 571 else:
572 572 if start == end:
573 573 # item we want to delete was not found, error out
574 574 raise AssertionError(
575 575 _("failed to remove %s from manifest") % f)
576 576 l = ""
577 577 if dstart is not None and dstart <= start and dend >= start:
578 578 if dend < end:
579 579 dend = end
580 580 if l:
581 581 dline.append(l)
582 582 else:
583 583 if dstart is not None:
584 584 delta.append([dstart, dend, "".join(dline)])
585 585 dstart = start
586 586 dend = end
587 587 dline = [l]
588 588
589 589 if dstart is not None:
590 590 delta.append([dstart, dend, "".join(dline)])
591 591 # apply the delta to the base, and get a delta for addrevision
592 592 deltatext, arraytext = _addlistdelta(base, delta)
593 593 else:
594 594 # For large changes, it's much cheaper to just build the text and
595 595 # diff it.
596 596 arraytext = bytearray(self.text())
597 597 deltatext = mdiff.textdiff(
598 598 util.buffer(base), util.buffer(arraytext))
599 599
600 600 return arraytext, deltatext
601 601
602 602 def _msearch(m, s, lo=0, hi=None):
603 603 '''return a tuple (start, end) that says where to find s within m.
604 604
605 605 If the string is found m[start:end] are the line containing
606 606 that string. If start == end the string was not found and
607 607 they indicate the proper sorted insertion point.
608 608
609 609 m should be a buffer, a memoryview or a byte string.
610 610 s is a byte string'''
611 611 def advance(i, c):
612 612 while i < lenm and m[i:i + 1] != c:
613 613 i += 1
614 614 return i
615 615 if not s:
616 616 return (lo, lo)
617 617 lenm = len(m)
618 618 if not hi:
619 619 hi = lenm
620 620 while lo < hi:
621 621 mid = (lo + hi) // 2
622 622 start = mid
623 623 while start > 0 and m[start - 1:start] != '\n':
624 624 start -= 1
625 625 end = advance(start, '\0')
626 626 if bytes(m[start:end]) < s:
627 627 # we know that after the null there are 40 bytes of sha1
628 628 # this translates to the bisect lo = mid + 1
629 629 lo = advance(end + 40, '\n') + 1
630 630 else:
631 631 # this translates to the bisect hi = mid
632 632 hi = start
633 633 end = advance(lo, '\0')
634 634 found = m[lo:end]
635 635 if s == found:
636 636 # we know that after the null there are 40 bytes of sha1
637 637 end = advance(end + 40, '\n')
638 638 return (lo, end + 1)
639 639 else:
640 640 return (lo, lo)
641 641
642 642 def _checkforbidden(l):
643 643 """Check filenames for illegal characters."""
644 644 for f in l:
645 645 if '\n' in f or '\r' in f:
646 646 raise error.StorageError(
647 647 _("'\\n' and '\\r' disallowed in filenames: %r")
648 648 % pycompat.bytestr(f))
649 649
650 650
651 651 # apply the changes collected during the bisect loop to our addlist
652 652 # return a delta suitable for addrevision
653 653 def _addlistdelta(addlist, x):
654 654 # for large addlist arrays, building a new array is cheaper
655 655 # than repeatedly modifying the existing one
656 656 currentposition = 0
657 657 newaddlist = bytearray()
658 658
659 659 for start, end, content in x:
660 660 newaddlist += addlist[currentposition:start]
661 661 if content:
662 662 newaddlist += bytearray(content)
663 663
664 664 currentposition = end
665 665
666 666 newaddlist += addlist[currentposition:]
667 667
668 668 deltatext = "".join(struct.pack(">lll", start, end, len(content))
669 669 + content for start, end, content in x)
670 670 return deltatext, newaddlist
671 671
672 672 def _splittopdir(f):
673 673 if '/' in f:
674 674 dir, subpath = f.split('/', 1)
675 675 return dir + '/', subpath
676 676 else:
677 677 return '', f
678 678
679 679 _noop = lambda s: None
680 680
681 681 class treemanifest(object):
682 682 def __init__(self, dir='', text=''):
683 683 self._dir = dir
684 684 self._node = nullid
685 685 self._loadfunc = _noop
686 686 self._copyfunc = _noop
687 687 self._dirty = False
688 688 self._dirs = {}
689 689 self._lazydirs = {}
690 690 # Using _lazymanifest here is a little slower than plain old dicts
691 691 self._files = {}
692 692 self._flags = {}
693 693 if text:
694 694 def readsubtree(subdir, subm):
695 695 raise AssertionError('treemanifest constructor only accepts '
696 696 'flat manifests')
697 697 self.parse(text, readsubtree)
698 698 self._dirty = True # Mark flat manifest dirty after parsing
699 699
700 700 def _subpath(self, path):
701 701 return self._dir + path
702 702
703 703 def _loadalllazy(self):
704 704 for k, (path, node, readsubtree) in self._lazydirs.iteritems():
705 705 self._dirs[k] = readsubtree(path, node)
706 706 self._lazydirs = {}
707 707
708 708 def _loadlazy(self, d):
709 709 path, node, readsubtree = self._lazydirs[d]
710 710 self._dirs[d] = readsubtree(path, node)
711 711 del self._lazydirs[d]
712 712
713 713 def _loadchildrensetlazy(self, visit):
714 714 if not visit:
715 715 return None
716 716 if visit == 'all' or visit == 'this':
717 717 self._loadalllazy()
718 718 return None
719 719
720 720 todel = []
721 721 for k in visit:
722 722 kslash = k + '/'
723 723 ld = self._lazydirs.get(kslash)
724 724 if ld:
725 725 path, node, readsubtree = ld
726 726 self._dirs[kslash] = readsubtree(path, node)
727 727 todel.append(kslash)
728 728 for kslash in todel:
729 729 del self._lazydirs[kslash]
730 730 return visit
731 731
732 732 def __len__(self):
733 733 self._load()
734 734 size = len(self._files)
735 735 self._loadalllazy()
736 736 for m in self._dirs.values():
737 737 size += m.__len__()
738 738 return size
739 739
740 740 def __nonzero__(self):
741 741 # Faster than "__len() != 0" since it avoids loading sub-manifests
742 742 return not self._isempty()
743 743
744 744 __bool__ = __nonzero__
745 745
746 746 def _isempty(self):
747 747 self._load() # for consistency; already loaded by all callers
748 748 # See if we can skip loading everything.
749 749 if self._files or (self._dirs and
750 750 any(not m._isempty() for m in self._dirs.values())):
751 751 return False
752 752 self._loadalllazy()
753 753 return (not self._dirs or
754 754 all(m._isempty() for m in self._dirs.values()))
755 755
756 756 def __repr__(self):
757 757 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
758 758 (self._dir, hex(self._node),
759 759 bool(self._loadfunc is _noop),
760 760 self._dirty, id(self)))
761 761
762 762 def dir(self):
763 763 '''The directory that this tree manifest represents, including a
764 764 trailing '/'. Empty string for the repo root directory.'''
765 765 return self._dir
766 766
767 767 def node(self):
768 768 '''This node of this instance. nullid for unsaved instances. Should
769 769 be updated when the instance is read or written from a revlog.
770 770 '''
771 771 assert not self._dirty
772 772 return self._node
773 773
774 774 def setnode(self, node):
775 775 self._node = node
776 776 self._dirty = False
777 777
778 778 def iterentries(self):
779 779 self._load()
780 780 self._loadalllazy()
781 781 for p, n in sorted(itertools.chain(self._dirs.items(),
782 782 self._files.items())):
783 783 if p in self._files:
784 784 yield self._subpath(p), n, self._flags.get(p, '')
785 785 else:
786 786 for x in n.iterentries():
787 787 yield x
788 788
789 789 def items(self):
790 790 self._load()
791 791 self._loadalllazy()
792 792 for p, n in sorted(itertools.chain(self._dirs.items(),
793 793 self._files.items())):
794 794 if p in self._files:
795 795 yield self._subpath(p), n
796 796 else:
797 797 for f, sn in n.iteritems():
798 798 yield f, sn
799 799
800 800 iteritems = items
801 801
802 802 def iterkeys(self):
803 803 self._load()
804 804 self._loadalllazy()
805 805 for p in sorted(itertools.chain(self._dirs, self._files)):
806 806 if p in self._files:
807 807 yield self._subpath(p)
808 808 else:
809 809 for f in self._dirs[p]:
810 810 yield f
811 811
812 812 def keys(self):
813 813 return list(self.iterkeys())
814 814
815 815 def __iter__(self):
816 816 return self.iterkeys()
817 817
818 818 def __contains__(self, f):
819 819 if f is None:
820 820 return False
821 821 self._load()
822 822 dir, subpath = _splittopdir(f)
823 823 if dir:
824 824 if dir in self._lazydirs:
825 825 self._loadlazy(dir)
826 826
827 827 if dir not in self._dirs:
828 828 return False
829 829
830 830 return self._dirs[dir].__contains__(subpath)
831 831 else:
832 832 return f in self._files
833 833
834 834 def get(self, f, default=None):
835 835 self._load()
836 836 dir, subpath = _splittopdir(f)
837 837 if dir:
838 838 if dir in self._lazydirs:
839 839 self._loadlazy(dir)
840 840
841 841 if dir not in self._dirs:
842 842 return default
843 843 return self._dirs[dir].get(subpath, default)
844 844 else:
845 845 return self._files.get(f, default)
846 846
847 847 def __getitem__(self, f):
848 848 self._load()
849 849 dir, subpath = _splittopdir(f)
850 850 if dir:
851 851 if dir in self._lazydirs:
852 852 self._loadlazy(dir)
853 853
854 854 return self._dirs[dir].__getitem__(subpath)
855 855 else:
856 856 return self._files[f]
857 857
858 858 def flags(self, f):
859 859 self._load()
860 860 dir, subpath = _splittopdir(f)
861 861 if dir:
862 862 if dir in self._lazydirs:
863 863 self._loadlazy(dir)
864 864
865 865 if dir not in self._dirs:
866 866 return ''
867 867 return self._dirs[dir].flags(subpath)
868 868 else:
869 869 if f in self._lazydirs or f in self._dirs:
870 870 return ''
871 871 return self._flags.get(f, '')
872 872
873 873 def find(self, f):
874 874 self._load()
875 875 dir, subpath = _splittopdir(f)
876 876 if dir:
877 877 if dir in self._lazydirs:
878 878 self._loadlazy(dir)
879 879
880 880 return self._dirs[dir].find(subpath)
881 881 else:
882 882 return self._files[f], self._flags.get(f, '')
883 883
884 884 def __delitem__(self, f):
885 885 self._load()
886 886 dir, subpath = _splittopdir(f)
887 887 if dir:
888 888 if dir in self._lazydirs:
889 889 self._loadlazy(dir)
890 890
891 891 self._dirs[dir].__delitem__(subpath)
892 892 # If the directory is now empty, remove it
893 893 if self._dirs[dir]._isempty():
894 894 del self._dirs[dir]
895 895 else:
896 896 del self._files[f]
897 897 if f in self._flags:
898 898 del self._flags[f]
899 899 self._dirty = True
900 900
901 901 def __setitem__(self, f, n):
902 902 assert n is not None
903 903 self._load()
904 904 dir, subpath = _splittopdir(f)
905 905 if dir:
906 906 if dir in self._lazydirs:
907 907 self._loadlazy(dir)
908 908 if dir not in self._dirs:
909 909 self._dirs[dir] = treemanifest(self._subpath(dir))
910 910 self._dirs[dir].__setitem__(subpath, n)
911 911 else:
912 912 self._files[f] = n[:21] # to match manifestdict's behavior
913 913 self._dirty = True
914 914
915 915 def _load(self):
916 916 if self._loadfunc is not _noop:
917 917 lf, self._loadfunc = self._loadfunc, _noop
918 918 lf(self)
919 919 elif self._copyfunc is not _noop:
920 920 cf, self._copyfunc = self._copyfunc, _noop
921 921 cf(self)
922 922
923 923 def setflag(self, f, flags):
924 924 """Set the flags (symlink, executable) for path f."""
925 925 self._load()
926 926 dir, subpath = _splittopdir(f)
927 927 if dir:
928 928 if dir in self._lazydirs:
929 929 self._loadlazy(dir)
930 930 if dir not in self._dirs:
931 931 self._dirs[dir] = treemanifest(self._subpath(dir))
932 932 self._dirs[dir].setflag(subpath, flags)
933 933 else:
934 934 self._flags[f] = flags
935 935 self._dirty = True
936 936
937 937 def copy(self):
938 938 copy = treemanifest(self._dir)
939 939 copy._node = self._node
940 940 copy._dirty = self._dirty
941 941 if self._copyfunc is _noop:
942 942 def _copyfunc(s):
943 943 self._load()
944 944 # OPT: it'd be nice to not load everything here. Unfortunately
945 945 # this makes a mess of the "dirty" state tracking if we don't.
946 946 self._loadalllazy()
947 947 sdirs = s._dirs
948 948 for d, v in self._dirs.iteritems():
949 949 sdirs[d] = v.copy()
950 950 s._files = dict.copy(self._files)
951 951 s._flags = dict.copy(self._flags)
952 952 if self._loadfunc is _noop:
953 953 _copyfunc(copy)
954 954 else:
955 955 copy._copyfunc = _copyfunc
956 956 else:
957 957 copy._copyfunc = self._copyfunc
958 958 return copy
959 959
960 960 def filesnotin(self, m2, match=None):
961 961 '''Set of files in this manifest that are not in the other'''
962 962 if match and not match.always():
963 963 m1 = self.matches(match)
964 964 m2 = m2.matches(match)
965 965 return m1.filesnotin(m2)
966 966
967 967 files = set()
968 968 def _filesnotin(t1, t2):
969 969 if t1._node == t2._node and not t1._dirty and not t2._dirty:
970 970 return
971 971 t1._load()
972 972 t2._load()
973 973 t1._loadalllazy()
974 974 t2._loadalllazy()
975 975 for d, m1 in t1._dirs.iteritems():
976 976 if d in t2._dirs:
977 977 m2 = t2._dirs[d]
978 978 _filesnotin(m1, m2)
979 979 else:
980 980 files.update(m1.iterkeys())
981 981
982 982 for fn in t1._files:
983 983 if fn not in t2._files:
984 984 files.add(t1._subpath(fn))
985 985
986 986 _filesnotin(self, m2)
987 987 return files
988 988
989 989 @propertycache
990 990 def _alldirs(self):
991 991 return util.dirs(self)
992 992
993 993 def dirs(self):
994 994 return self._alldirs
995 995
996 996 def hasdir(self, dir):
997 997 self._load()
998 998 topdir, subdir = _splittopdir(dir)
999 999 if topdir:
1000 1000 if topdir in self._lazydirs:
1001 1001 self._loadlazy(topdir)
1002 1002 if topdir in self._dirs:
1003 1003 return self._dirs[topdir].hasdir(subdir)
1004 1004 return False
1005 1005 dirslash = dir + '/'
1006 1006 return dirslash in self._dirs or dirslash in self._lazydirs
1007 1007
1008 1008 def walk(self, match):
1009 1009 '''Generates matching file names.
1010 1010
1011 1011 Equivalent to manifest.matches(match).iterkeys(), but without creating
1012 1012 an entirely new manifest.
1013 1013
1014 1014 It also reports nonexistent files by marking them bad with match.bad().
1015 1015 '''
1016 1016 if match.always():
1017 1017 for f in iter(self):
1018 1018 yield f
1019 1019 return
1020 1020
1021 1021 fset = set(match.files())
1022 1022
1023 1023 for fn in self._walk(match):
1024 1024 if fn in fset:
1025 1025 # specified pattern is the exact name
1026 1026 fset.remove(fn)
1027 1027 yield fn
1028 1028
1029 1029 # for dirstate.walk, files=['.'] means "walk the whole tree".
1030 1030 # follow that here, too
1031 1031 fset.discard('.')
1032 1032
1033 1033 for fn in sorted(fset):
1034 1034 if not self.hasdir(fn):
1035 1035 match.bad(fn, None)
1036 1036
1037 1037 def _walk(self, match):
1038 1038 '''Recursively generates matching file names for walk().'''
1039 1039 visit = match.visitchildrenset(self._dir[:-1] or '.')
1040 1040 if not visit:
1041 1041 return
1042 1042
1043 1043 # yield this dir's files and walk its submanifests
1044 1044 self._load()
1045 1045 visit = self._loadchildrensetlazy(visit)
1046 1046 for p in sorted(list(self._dirs) + list(self._files)):
1047 1047 if p in self._files:
1048 1048 fullp = self._subpath(p)
1049 1049 if match(fullp):
1050 1050 yield fullp
1051 1051 else:
1052 1052 if not visit or p[:-1] in visit:
1053 1053 for f in self._dirs[p]._walk(match):
1054 1054 yield f
1055 1055
1056 1056 def matches(self, match):
1057 1057 '''generate a new manifest filtered by the match argument'''
1058 1058 if match.always():
1059 1059 return self.copy()
1060 1060
1061 1061 return self._matches(match)
1062 1062
1063 1063 def _matches(self, match):
1064 1064 '''recursively generate a new manifest filtered by the match argument.
1065 1065 '''
1066 1066
1067 1067 visit = match.visitchildrenset(self._dir[:-1] or '.')
1068 1068 if visit == 'all':
1069 1069 return self.copy()
1070 1070 ret = treemanifest(self._dir)
1071 1071 if not visit:
1072 1072 return ret
1073 1073
1074 1074 self._load()
1075 1075 for fn in self._files:
1076 1076 # While visitchildrenset *usually* lists only subdirs, this is
1077 1077 # actually up to the matcher and may have some files in the set().
1078 1078 # If visit == 'this', we should obviously look at the files in this
1079 1079 # directory; if visit is a set, and fn is in it, we should inspect
1080 1080 # fn (but no need to inspect things not in the set).
1081 1081 if visit != 'this' and fn not in visit:
1082 1082 continue
1083 1083 fullp = self._subpath(fn)
1084 1084 # visitchildrenset isn't perfect, we still need to call the regular
1085 1085 # matcher code to further filter results.
1086 1086 if not match(fullp):
1087 1087 continue
1088 1088 ret._files[fn] = self._files[fn]
1089 1089 if fn in self._flags:
1090 1090 ret._flags[fn] = self._flags[fn]
1091 1091
1092 1092 visit = self._loadchildrensetlazy(visit)
1093 1093 for dir, subm in self._dirs.iteritems():
1094 1094 if visit and dir[:-1] not in visit:
1095 1095 continue
1096 1096 m = subm._matches(match)
1097 1097 if not m._isempty():
1098 1098 ret._dirs[dir] = m
1099 1099
1100 1100 if not ret._isempty():
1101 1101 ret._dirty = True
1102 1102 return ret
1103 1103
1104 1104 def diff(self, m2, match=None, clean=False):
1105 1105 '''Finds changes between the current manifest and m2.
1106 1106
1107 1107 Args:
1108 1108 m2: the manifest to which this manifest should be compared.
1109 1109 clean: if true, include files unchanged between these manifests
1110 1110 with a None value in the returned dictionary.
1111 1111
1112 1112 The result is returned as a dict with filename as key and
1113 1113 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1114 1114 nodeid in the current/other manifest and fl1/fl2 is the flag
1115 1115 in the current/other manifest. Where the file does not exist,
1116 1116 the nodeid will be None and the flags will be the empty
1117 1117 string.
1118 1118 '''
1119 1119 if match and not match.always():
1120 1120 m1 = self.matches(match)
1121 1121 m2 = m2.matches(match)
1122 1122 return m1.diff(m2, clean=clean)
1123 1123 result = {}
1124 1124 emptytree = treemanifest()
1125 1125 def _diff(t1, t2):
1126 1126 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1127 1127 return
1128 1128 t1._load()
1129 1129 t2._load()
1130 1130 # OPT: do we need to load everything?
1131 1131 t1._loadalllazy()
1132 1132 t2._loadalllazy()
1133 1133 for d, m1 in t1._dirs.iteritems():
1134 1134 m2 = t2._dirs.get(d, emptytree)
1135 1135 _diff(m1, m2)
1136 1136
1137 1137 for d, m2 in t2._dirs.iteritems():
1138 1138 if d not in t1._dirs:
1139 1139 _diff(emptytree, m2)
1140 1140
1141 1141 for fn, n1 in t1._files.iteritems():
1142 1142 fl1 = t1._flags.get(fn, '')
1143 1143 n2 = t2._files.get(fn, None)
1144 1144 fl2 = t2._flags.get(fn, '')
1145 1145 if n1 != n2 or fl1 != fl2:
1146 1146 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1147 1147 elif clean:
1148 1148 result[t1._subpath(fn)] = None
1149 1149
1150 1150 for fn, n2 in t2._files.iteritems():
1151 1151 if fn not in t1._files:
1152 1152 fl2 = t2._flags.get(fn, '')
1153 1153 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1154 1154
1155 1155 _diff(self, m2)
1156 1156 return result
1157 1157
1158 1158 def unmodifiedsince(self, m2):
1159 1159 return not self._dirty and not m2._dirty and self._node == m2._node
1160 1160
1161 1161 def parse(self, text, readsubtree):
1162 1162 selflazy = self._lazydirs
1163 1163 subpath = self._subpath
1164 1164 for f, n, fl in _parse(text):
1165 1165 if fl == 't':
1166 1166 f = f + '/'
1167 1167 selflazy[f] = (subpath(f), n, readsubtree)
1168 1168 elif '/' in f:
1169 1169 # This is a flat manifest, so use __setitem__ and setflag rather
1170 1170 # than assigning directly to _files and _flags, so we can
1171 1171 # assign a path in a subdirectory, and to mark dirty (compared
1172 1172 # to nullid).
1173 1173 self[f] = n
1174 1174 if fl:
1175 1175 self.setflag(f, fl)
1176 1176 else:
1177 1177 # Assigning to _files and _flags avoids marking as dirty,
1178 1178 # and should be a little faster.
1179 1179 self._files[f] = n
1180 1180 if fl:
1181 1181 self._flags[f] = fl
1182 1182
1183 1183 def text(self):
1184 1184 """Get the full data of this manifest as a bytestring."""
1185 1185 self._load()
1186 1186 return _text(self.iterentries())
1187 1187
1188 1188 def dirtext(self):
1189 1189 """Get the full data of this directory as a bytestring. Make sure that
1190 1190 any submanifests have been written first, so their nodeids are correct.
1191 1191 """
1192 1192 self._load()
1193 1193 flags = self.flags
1194 1194 lazydirs = [(d[:-1], node, 't') for
1195 1195 d, (path, node, readsubtree) in self._lazydirs.iteritems()]
1196 1196 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1197 1197 files = [(f, self._files[f], flags(f)) for f in self._files]
1198 1198 return _text(sorted(dirs + files + lazydirs))
1199 1199
1200 1200 def read(self, gettext, readsubtree):
1201 1201 def _load_for_read(s):
1202 1202 s.parse(gettext(), readsubtree)
1203 1203 s._dirty = False
1204 1204 self._loadfunc = _load_for_read
1205 1205
1206 1206 def writesubtrees(self, m1, m2, writesubtree, match):
1207 1207 self._load() # for consistency; should never have any effect here
1208 1208 m1._load()
1209 1209 m2._load()
1210 1210 emptytree = treemanifest()
1211 1211 def getnode(m, d):
1212 1212 ld = m._lazydirs.get(d)
1213 1213 if ld:
1214 1214 return ld[1]
1215 1215 return m._dirs.get(d, emptytree)._node
1216 1216
1217 1217 # we should have always loaded everything by the time we get here for
1218 1218 # `self`, but possibly not in `m1` or `m2`.
1219 1219 assert not self._lazydirs
1220 1220 # let's skip investigating things that `match` says we do not need.
1221 1221 visit = match.visitchildrenset(self._dir[:-1] or '.')
1222 1222 if visit == 'this' or visit == 'all':
1223 1223 visit = None
1224 1224 for d, subm in self._dirs.iteritems():
1225 1225 if visit and d[:-1] not in visit:
1226 1226 continue
1227 1227 subp1 = getnode(m1, d)
1228 1228 subp2 = getnode(m2, d)
1229 1229 if subp1 == nullid:
1230 1230 subp1, subp2 = subp2, subp1
1231 1231 writesubtree(subm, subp1, subp2, match)
1232 1232
1233 1233 def walksubtrees(self, matcher=None):
1234 1234 """Returns an iterator of the subtrees of this manifest, including this
1235 1235 manifest itself.
1236 1236
1237 1237 If `matcher` is provided, it only returns subtrees that match.
1238 1238 """
1239 1239 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1240 1240 return
1241 1241 if not matcher or matcher(self._dir[:-1]):
1242 1242 yield self
1243 1243
1244 1244 self._load()
1245 1245 # OPT: use visitchildrenset to avoid loading everything.
1246 1246 self._loadalllazy()
1247 1247 for d, subm in self._dirs.iteritems():
1248 1248 for subtree in subm.walksubtrees(matcher=matcher):
1249 1249 yield subtree
1250 1250
1251 1251 class manifestfulltextcache(util.lrucachedict):
1252 1252 """File-backed LRU cache for the manifest cache
1253 1253
1254 1254 File consists of entries, up to EOF:
1255 1255
1256 1256 - 20 bytes node, 4 bytes length, <length> manifest data
1257 1257
1258 1258 These are written in reverse cache order (oldest to newest).
1259 1259
1260 1260 """
1261 1261 def __init__(self, max):
1262 1262 super(manifestfulltextcache, self).__init__(max)
1263 1263 self._dirty = False
1264 1264 self._read = False
1265 1265 self._opener = None
1266 1266
1267 1267 def read(self):
1268 1268 if self._read or self._opener is None:
1269 1269 return
1270 1270
1271 1271 try:
1272 1272 with self._opener('manifestfulltextcache') as fp:
1273 1273 set = super(manifestfulltextcache, self).__setitem__
1274 1274 # ignore trailing data, this is a cache, corruption is skipped
1275 1275 while True:
1276 1276 node = fp.read(20)
1277 1277 if len(node) < 20:
1278 1278 break
1279 1279 try:
1280 1280 size = struct.unpack('>L', fp.read(4))[0]
1281 1281 except struct.error:
1282 1282 break
1283 1283 value = bytearray(fp.read(size))
1284 1284 if len(value) != size:
1285 1285 break
1286 1286 set(node, value)
1287 1287 except IOError:
1288 1288 # the file is allowed to be missing
1289 1289 pass
1290 1290
1291 1291 self._read = True
1292 1292 self._dirty = False
1293 1293
1294 1294 def write(self):
1295 1295 if not self._dirty or self._opener is None:
1296 1296 return
1297 1297 # rotate backwards to the first used node
1298 1298 with self._opener(
1299 1299 'manifestfulltextcache', 'w', atomictemp=True, checkambig=True
1300 1300 ) as fp:
1301 1301 node = self._head.prev
1302 1302 while True:
1303 1303 if node.key in self._cache:
1304 1304 fp.write(node.key)
1305 1305 fp.write(struct.pack('>L', len(node.value)))
1306 1306 fp.write(node.value)
1307 1307 if node is self._head:
1308 1308 break
1309 1309 node = node.prev
1310 1310
1311 1311 def __len__(self):
1312 1312 if not self._read:
1313 1313 self.read()
1314 1314 return super(manifestfulltextcache, self).__len__()
1315 1315
1316 1316 def __contains__(self, k):
1317 1317 if not self._read:
1318 1318 self.read()
1319 1319 return super(manifestfulltextcache, self).__contains__(k)
1320 1320
1321 1321 def __iter__(self):
1322 1322 if not self._read:
1323 1323 self.read()
1324 1324 return super(manifestfulltextcache, self).__iter__()
1325 1325
1326 1326 def __getitem__(self, k):
1327 1327 if not self._read:
1328 1328 self.read()
1329 1329 # the cache lru order can change on read
1330 1330 setdirty = self._cache.get(k) is not self._head
1331 1331 value = super(manifestfulltextcache, self).__getitem__(k)
1332 1332 if setdirty:
1333 1333 self._dirty = True
1334 1334 return value
1335 1335
1336 1336 def __setitem__(self, k, v):
1337 1337 if not self._read:
1338 1338 self.read()
1339 1339 super(manifestfulltextcache, self).__setitem__(k, v)
1340 1340 self._dirty = True
1341 1341
1342 1342 def __delitem__(self, k):
1343 1343 if not self._read:
1344 1344 self.read()
1345 1345 super(manifestfulltextcache, self).__delitem__(k)
1346 1346 self._dirty = True
1347 1347
1348 1348 def get(self, k, default=None):
1349 1349 if not self._read:
1350 1350 self.read()
1351 1351 return super(manifestfulltextcache, self).get(k, default=default)
1352 1352
1353 1353 def clear(self, clear_persisted_data=False):
1354 1354 super(manifestfulltextcache, self).clear()
1355 1355 if clear_persisted_data:
1356 1356 self._dirty = True
1357 1357 self.write()
1358 1358 self._read = False
1359 1359
1360 1360 @interfaceutil.implementer(repository.imanifeststorage)
1361 1361 class manifestrevlog(object):
1362 1362 '''A revlog that stores manifest texts. This is responsible for caching the
1363 1363 full-text manifest contents.
1364 1364 '''
1365 1365 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1366 1366 treemanifest=False):
1367 1367 """Constructs a new manifest revlog
1368 1368
1369 1369 `indexfile` - used by extensions to have two manifests at once, like
1370 1370 when transitioning between flatmanifeset and treemanifests.
1371 1371
1372 1372 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1373 1373 options can also be used to make this a tree manifest revlog. The opener
1374 1374 option takes precedence, so if it is set to True, we ignore whatever
1375 1375 value is passed in to the constructor.
1376 1376 """
1377 1377 # During normal operations, we expect to deal with not more than four
1378 1378 # revs at a time (such as during commit --amend). When rebasing large
1379 1379 # stacks of commits, the number can go up, hence the config knob below.
1380 1380 cachesize = 4
1381 1381 optiontreemanifest = False
1382 1382 opts = getattr(opener, 'options', None)
1383 1383 if opts is not None:
1384 1384 cachesize = opts.get('manifestcachesize', cachesize)
1385 1385 optiontreemanifest = opts.get('treemanifest', False)
1386 1386
1387 1387 self._treeondisk = optiontreemanifest or treemanifest
1388 1388
1389 1389 self._fulltextcache = manifestfulltextcache(cachesize)
1390 1390
1391 1391 if tree:
1392 1392 assert self._treeondisk, 'opts is %r' % opts
1393 1393
1394 1394 if indexfile is None:
1395 1395 indexfile = '00manifest.i'
1396 1396 if tree:
1397 1397 indexfile = "meta/" + tree + indexfile
1398 1398
1399 1399 self.tree = tree
1400 1400
1401 1401 # The dirlogcache is kept on the root manifest log
1402 1402 if tree:
1403 1403 self._dirlogcache = dirlogcache
1404 1404 else:
1405 1405 self._dirlogcache = {'': self}
1406 1406
1407 1407 self._revlog = revlog.revlog(opener, indexfile,
1408 1408 # only root indexfile is cached
1409 1409 checkambig=not bool(tree),
1410 1410 mmaplargeindex=True)
1411 1411
1412 1412 self.index = self._revlog.index
1413 1413 self.version = self._revlog.version
1414 1414 self._generaldelta = self._revlog._generaldelta
1415 1415
1416 1416 def _setupmanifestcachehooks(self, repo):
1417 1417 """Persist the manifestfulltextcache on lock release"""
1418 1418 if not util.safehasattr(repo, '_lockref'):
1419 1419 return
1420 1420
1421 1421 self._fulltextcache._opener = repo.cachevfs
1422 1422 reporef = weakref.ref(repo)
1423 1423 manifestrevlogref = weakref.ref(self)
1424 1424
1425 1425 def persistmanifestcache():
1426 1426 repo = reporef()
1427 1427 self = manifestrevlogref()
1428 1428 if repo is None or self is None:
1429 1429 return
1430 1430 if repo.manifestlog.getstorage(b'') is not self:
1431 1431 # there's a different manifest in play now, abort
1432 1432 return
1433 1433 self._fulltextcache.write()
1434 1434
1435 1435 if repo._currentlock(repo._lockref) is not None:
1436 1436 repo._afterlock(persistmanifestcache)
1437 1437
1438 1438 @property
1439 1439 def fulltextcache(self):
1440 1440 return self._fulltextcache
1441 1441
1442 1442 def clearcaches(self, clear_persisted_data=False):
1443 1443 self._revlog.clearcaches()
1444 1444 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1445 1445 self._dirlogcache = {self.tree: self}
1446 1446
1447 1447 def dirlog(self, d):
1448 1448 if d:
1449 1449 assert self._treeondisk
1450 1450 if d not in self._dirlogcache:
1451 1451 mfrevlog = manifestrevlog(self.opener, d,
1452 1452 self._dirlogcache,
1453 1453 treemanifest=self._treeondisk)
1454 1454 self._dirlogcache[d] = mfrevlog
1455 1455 return self._dirlogcache[d]
1456 1456
1457 1457 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1458 1458 match=None):
1459 1459 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1460 1460 # If our first parent is in the manifest cache, we can
1461 1461 # compute a delta here using properties we know about the
1462 1462 # manifest up-front, which may save time later for the
1463 1463 # revlog layer.
1464 1464
1465 1465 _checkforbidden(added)
1466 1466 # combine the changed lists into one sorted iterator
1467 1467 work = heapq.merge([(x, False) for x in added],
1468 1468 [(x, True) for x in removed])
1469 1469
1470 1470 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1471 1471 cachedelta = self._revlog.rev(p1), deltatext
1472 1472 text = util.buffer(arraytext)
1473 1473 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1474 1474 cachedelta)
1475 1475 else:
1476 1476 # The first parent manifest isn't already loaded, so we'll
1477 1477 # just encode a fulltext of the manifest and pass that
1478 1478 # through to the revlog layer, and let it handle the delta
1479 1479 # process.
1480 1480 if self._treeondisk:
1481 1481 assert readtree, "readtree must be set for treemanifest writes"
1482 1482 assert match, "match must be specified for treemanifest writes"
1483 1483 m1 = readtree(self.tree, p1)
1484 1484 m2 = readtree(self.tree, p2)
1485 1485 n = self._addtree(m, transaction, link, m1, m2, readtree,
1486 1486 match=match)
1487 1487 arraytext = None
1488 1488 else:
1489 1489 text = m.text()
1490 1490 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1491 1491 arraytext = bytearray(text)
1492 1492
1493 1493 if arraytext is not None:
1494 1494 self.fulltextcache[n] = arraytext
1495 1495
1496 1496 return n
1497 1497
1498 1498 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1499 1499 # If the manifest is unchanged compared to one parent,
1500 1500 # don't write a new revision
1501 1501 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1502 1502 m2)):
1503 1503 return m.node()
1504 1504 def writesubtree(subm, subp1, subp2, match):
1505 1505 sublog = self.dirlog(subm.dir())
1506 1506 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1507 1507 readtree=readtree, match=match)
1508 1508 m.writesubtrees(m1, m2, writesubtree, match)
1509 1509 text = m.dirtext()
1510 1510 n = None
1511 1511 if self.tree != '':
1512 1512 # Double-check whether contents are unchanged to one parent
1513 1513 if text == m1.dirtext():
1514 1514 n = m1.node()
1515 1515 elif text == m2.dirtext():
1516 1516 n = m2.node()
1517 1517
1518 1518 if not n:
1519 1519 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1520 1520 m2.node())
1521 1521
1522 1522 # Save nodeid so parent manifest can calculate its nodeid
1523 1523 m.setnode(n)
1524 1524 return n
1525 1525
1526 1526 def __len__(self):
1527 1527 return len(self._revlog)
1528 1528
1529 1529 def __iter__(self):
1530 1530 return self._revlog.__iter__()
1531 1531
1532 1532 def rev(self, node):
1533 1533 return self._revlog.rev(node)
1534 1534
1535 1535 def node(self, rev):
1536 1536 return self._revlog.node(rev)
1537 1537
1538 1538 def lookup(self, value):
1539 1539 return self._revlog.lookup(value)
1540 1540
1541 1541 def parentrevs(self, rev):
1542 1542 return self._revlog.parentrevs(rev)
1543 1543
1544 1544 def parents(self, node):
1545 1545 return self._revlog.parents(node)
1546 1546
1547 1547 def linkrev(self, rev):
1548 1548 return self._revlog.linkrev(rev)
1549 1549
1550 1550 def checksize(self):
1551 1551 return self._revlog.checksize()
1552 1552
1553 1553 def revision(self, node, _df=None, raw=False):
1554 1554 return self._revlog.revision(node, _df=_df, raw=raw)
1555 1555
1556 1556 def revdiff(self, rev1, rev2):
1557 1557 return self._revlog.revdiff(rev1, rev2)
1558 1558
1559 1559 def cmp(self, node, text):
1560 1560 return self._revlog.cmp(node, text)
1561 1561
1562 1562 def deltaparent(self, rev):
1563 1563 return self._revlog.deltaparent(rev)
1564 1564
1565 1565 def emitrevisiondeltas(self, requests):
1566 1566 return self._revlog.emitrevisiondeltas(requests)
1567 1567
1568 1568 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1569 1569 return self._revlog.addgroup(deltas, linkmapper, transaction,
1570 1570 addrevisioncb=addrevisioncb)
1571 1571
1572 def rawsize(self, rev):
1573 return self._revlog.rawsize(rev)
1574
1572 1575 def getstrippoint(self, minlink):
1573 1576 return self._revlog.getstrippoint(minlink)
1574 1577
1575 1578 def strip(self, minlink, transaction):
1576 1579 return self._revlog.strip(minlink, transaction)
1577 1580
1578 1581 def files(self):
1579 1582 return self._revlog.files()
1580 1583
1581 1584 def clone(self, tr, destrevlog, **kwargs):
1582 1585 if not isinstance(destrevlog, manifestrevlog):
1583 1586 raise error.ProgrammingError('expected manifestrevlog to clone()')
1584 1587
1585 1588 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1586 1589
1587 1590 @property
1588 1591 def indexfile(self):
1589 1592 return self._revlog.indexfile
1590 1593
1591 1594 @indexfile.setter
1592 1595 def indexfile(self, value):
1593 1596 self._revlog.indexfile = value
1594 1597
1595 1598 @property
1596 1599 def opener(self):
1597 1600 return self._revlog.opener
1598 1601
1599 1602 @opener.setter
1600 1603 def opener(self, value):
1601 1604 self._revlog.opener = value
1602 1605
1603 1606 @interfaceutil.implementer(repository.imanifestlog)
1604 1607 class manifestlog(object):
1605 1608 """A collection class representing the collection of manifest snapshots
1606 1609 referenced by commits in the repository.
1607 1610
1608 1611 In this situation, 'manifest' refers to the abstract concept of a snapshot
1609 1612 of the list of files in the given commit. Consumers of the output of this
1610 1613 class do not care about the implementation details of the actual manifests
1611 1614 they receive (i.e. tree or flat or lazily loaded, etc)."""
1612 1615 def __init__(self, opener, repo, rootstore):
1613 1616 usetreemanifest = False
1614 1617 cachesize = 4
1615 1618
1616 1619 opts = getattr(opener, 'options', None)
1617 1620 if opts is not None:
1618 1621 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1619 1622 cachesize = opts.get('manifestcachesize', cachesize)
1620 1623
1621 1624 self._treemanifests = usetreemanifest
1622 1625
1623 1626 self._rootstore = rootstore
1624 1627 self._rootstore._setupmanifestcachehooks(repo)
1625 1628 self._narrowmatch = repo.narrowmatch()
1626 1629
1627 1630 # A cache of the manifestctx or treemanifestctx for each directory
1628 1631 self._dirmancache = {}
1629 1632 self._dirmancache[''] = util.lrucachedict(cachesize)
1630 1633
1631 1634 self._cachesize = cachesize
1632 1635
1633 1636 def __getitem__(self, node):
1634 1637 """Retrieves the manifest instance for the given node. Throws a
1635 1638 LookupError if not found.
1636 1639 """
1637 1640 return self.get('', node)
1638 1641
1639 1642 def get(self, tree, node, verify=True):
1640 1643 """Retrieves the manifest instance for the given node. Throws a
1641 1644 LookupError if not found.
1642 1645
1643 1646 `verify` - if True an exception will be thrown if the node is not in
1644 1647 the revlog
1645 1648 """
1646 1649 if node in self._dirmancache.get(tree, ()):
1647 1650 return self._dirmancache[tree][node]
1648 1651
1649 1652 if not self._narrowmatch.always():
1650 1653 if not self._narrowmatch.visitdir(tree[:-1] or '.'):
1651 1654 return excludeddirmanifestctx(tree, node)
1652 1655 if tree:
1653 1656 if self._rootstore._treeondisk:
1654 1657 if verify:
1655 1658 # Side-effect is LookupError is raised if node doesn't
1656 1659 # exist.
1657 1660 self.getstorage(tree).rev(node)
1658 1661
1659 1662 m = treemanifestctx(self, tree, node)
1660 1663 else:
1661 1664 raise error.Abort(
1662 1665 _("cannot ask for manifest directory '%s' in a flat "
1663 1666 "manifest") % tree)
1664 1667 else:
1665 1668 if verify:
1666 1669 # Side-effect is LookupError is raised if node doesn't exist.
1667 1670 self._rootstore.rev(node)
1668 1671
1669 1672 if self._treemanifests:
1670 1673 m = treemanifestctx(self, '', node)
1671 1674 else:
1672 1675 m = manifestctx(self, node)
1673 1676
1674 1677 if node != nullid:
1675 1678 mancache = self._dirmancache.get(tree)
1676 1679 if not mancache:
1677 1680 mancache = util.lrucachedict(self._cachesize)
1678 1681 self._dirmancache[tree] = mancache
1679 1682 mancache[node] = m
1680 1683 return m
1681 1684
1682 1685 def getstorage(self, tree):
1683 1686 return self._rootstore.dirlog(tree)
1684 1687
1685 1688 def clearcaches(self, clear_persisted_data=False):
1686 1689 self._dirmancache.clear()
1687 1690 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1688 1691
1689 1692 def rev(self, node):
1690 1693 return self._rootstore.rev(node)
1691 1694
1692 1695 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1693 1696 class memmanifestctx(object):
1694 1697 def __init__(self, manifestlog):
1695 1698 self._manifestlog = manifestlog
1696 1699 self._manifestdict = manifestdict()
1697 1700
1698 1701 def _storage(self):
1699 1702 return self._manifestlog.getstorage(b'')
1700 1703
1701 1704 def new(self):
1702 1705 return memmanifestctx(self._manifestlog)
1703 1706
1704 1707 def copy(self):
1705 1708 memmf = memmanifestctx(self._manifestlog)
1706 1709 memmf._manifestdict = self.read().copy()
1707 1710 return memmf
1708 1711
1709 1712 def read(self):
1710 1713 return self._manifestdict
1711 1714
1712 1715 def write(self, transaction, link, p1, p2, added, removed, match=None):
1713 1716 return self._storage().add(self._manifestdict, transaction, link,
1714 1717 p1, p2, added, removed, match=match)
1715 1718
1716 1719 @interfaceutil.implementer(repository.imanifestrevisionstored)
1717 1720 class manifestctx(object):
1718 1721 """A class representing a single revision of a manifest, including its
1719 1722 contents, its parent revs, and its linkrev.
1720 1723 """
1721 1724 def __init__(self, manifestlog, node):
1722 1725 self._manifestlog = manifestlog
1723 1726 self._data = None
1724 1727
1725 1728 self._node = node
1726 1729
1727 1730 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1728 1731 # but let's add it later when something needs it and we can load it
1729 1732 # lazily.
1730 1733 #self.p1, self.p2 = store.parents(node)
1731 1734 #rev = store.rev(node)
1732 1735 #self.linkrev = store.linkrev(rev)
1733 1736
1734 1737 def _storage(self):
1735 1738 return self._manifestlog.getstorage(b'')
1736 1739
1737 1740 def node(self):
1738 1741 return self._node
1739 1742
1740 1743 def new(self):
1741 1744 return memmanifestctx(self._manifestlog)
1742 1745
1743 1746 def copy(self):
1744 1747 memmf = memmanifestctx(self._manifestlog)
1745 1748 memmf._manifestdict = self.read().copy()
1746 1749 return memmf
1747 1750
1748 1751 @propertycache
1749 1752 def parents(self):
1750 1753 return self._storage().parents(self._node)
1751 1754
1752 1755 def read(self):
1753 1756 if self._data is None:
1754 1757 if self._node == nullid:
1755 1758 self._data = manifestdict()
1756 1759 else:
1757 1760 store = self._storage()
1758 1761 if self._node in store.fulltextcache:
1759 1762 text = pycompat.bytestr(store.fulltextcache[self._node])
1760 1763 else:
1761 1764 text = store.revision(self._node)
1762 1765 arraytext = bytearray(text)
1763 1766 store.fulltextcache[self._node] = arraytext
1764 1767 self._data = manifestdict(text)
1765 1768 return self._data
1766 1769
1767 1770 def readfast(self, shallow=False):
1768 1771 '''Calls either readdelta or read, based on which would be less work.
1769 1772 readdelta is called if the delta is against the p1, and therefore can be
1770 1773 read quickly.
1771 1774
1772 1775 If `shallow` is True, nothing changes since this is a flat manifest.
1773 1776 '''
1774 1777 store = self._storage()
1775 1778 r = store.rev(self._node)
1776 1779 deltaparent = store.deltaparent(r)
1777 1780 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1778 1781 return self.readdelta()
1779 1782 return self.read()
1780 1783
1781 1784 def readdelta(self, shallow=False):
1782 1785 '''Returns a manifest containing just the entries that are present
1783 1786 in this manifest, but not in its p1 manifest. This is efficient to read
1784 1787 if the revlog delta is already p1.
1785 1788
1786 1789 Changing the value of `shallow` has no effect on flat manifests.
1787 1790 '''
1788 1791 store = self._storage()
1789 1792 r = store.rev(self._node)
1790 1793 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1791 1794 return manifestdict(d)
1792 1795
1793 1796 def find(self, key):
1794 1797 return self.read().find(key)
1795 1798
1796 1799 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1797 1800 class memtreemanifestctx(object):
1798 1801 def __init__(self, manifestlog, dir=''):
1799 1802 self._manifestlog = manifestlog
1800 1803 self._dir = dir
1801 1804 self._treemanifest = treemanifest()
1802 1805
1803 1806 def _storage(self):
1804 1807 return self._manifestlog.getstorage(b'')
1805 1808
1806 1809 def new(self, dir=''):
1807 1810 return memtreemanifestctx(self._manifestlog, dir=dir)
1808 1811
1809 1812 def copy(self):
1810 1813 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1811 1814 memmf._treemanifest = self._treemanifest.copy()
1812 1815 return memmf
1813 1816
1814 1817 def read(self):
1815 1818 return self._treemanifest
1816 1819
1817 1820 def write(self, transaction, link, p1, p2, added, removed, match=None):
1818 1821 def readtree(dir, node):
1819 1822 return self._manifestlog.get(dir, node).read()
1820 1823 return self._storage().add(self._treemanifest, transaction, link,
1821 1824 p1, p2, added, removed, readtree=readtree,
1822 1825 match=match)
1823 1826
1824 1827 @interfaceutil.implementer(repository.imanifestrevisionstored)
1825 1828 class treemanifestctx(object):
1826 1829 def __init__(self, manifestlog, dir, node):
1827 1830 self._manifestlog = manifestlog
1828 1831 self._dir = dir
1829 1832 self._data = None
1830 1833
1831 1834 self._node = node
1832 1835
1833 1836 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1834 1837 # we can instantiate treemanifestctx objects for directories we don't
1835 1838 # have on disk.
1836 1839 #self.p1, self.p2 = store.parents(node)
1837 1840 #rev = store.rev(node)
1838 1841 #self.linkrev = store.linkrev(rev)
1839 1842
1840 1843 def _storage(self):
1841 1844 narrowmatch = self._manifestlog._narrowmatch
1842 1845 if not narrowmatch.always():
1843 1846 if not narrowmatch.visitdir(self._dir[:-1] or '.'):
1844 1847 return excludedmanifestrevlog(self._dir)
1845 1848 return self._manifestlog.getstorage(self._dir)
1846 1849
1847 1850 def read(self):
1848 1851 if self._data is None:
1849 1852 store = self._storage()
1850 1853 if self._node == nullid:
1851 1854 self._data = treemanifest()
1852 1855 # TODO accessing non-public API
1853 1856 elif store._treeondisk:
1854 1857 m = treemanifest(dir=self._dir)
1855 1858 def gettext():
1856 1859 return store.revision(self._node)
1857 1860 def readsubtree(dir, subm):
1858 1861 # Set verify to False since we need to be able to create
1859 1862 # subtrees for trees that don't exist on disk.
1860 1863 return self._manifestlog.get(dir, subm, verify=False).read()
1861 1864 m.read(gettext, readsubtree)
1862 1865 m.setnode(self._node)
1863 1866 self._data = m
1864 1867 else:
1865 1868 if self._node in store.fulltextcache:
1866 1869 text = pycompat.bytestr(store.fulltextcache[self._node])
1867 1870 else:
1868 1871 text = store.revision(self._node)
1869 1872 arraytext = bytearray(text)
1870 1873 store.fulltextcache[self._node] = arraytext
1871 1874 self._data = treemanifest(dir=self._dir, text=text)
1872 1875
1873 1876 return self._data
1874 1877
1875 1878 def node(self):
1876 1879 return self._node
1877 1880
1878 1881 def new(self, dir=''):
1879 1882 return memtreemanifestctx(self._manifestlog, dir=dir)
1880 1883
1881 1884 def copy(self):
1882 1885 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1883 1886 memmf._treemanifest = self.read().copy()
1884 1887 return memmf
1885 1888
1886 1889 @propertycache
1887 1890 def parents(self):
1888 1891 return self._storage().parents(self._node)
1889 1892
1890 1893 def readdelta(self, shallow=False):
1891 1894 '''Returns a manifest containing just the entries that are present
1892 1895 in this manifest, but not in its p1 manifest. This is efficient to read
1893 1896 if the revlog delta is already p1.
1894 1897
1895 1898 If `shallow` is True, this will read the delta for this directory,
1896 1899 without recursively reading subdirectory manifests. Instead, any
1897 1900 subdirectory entry will be reported as it appears in the manifest, i.e.
1898 1901 the subdirectory will be reported among files and distinguished only by
1899 1902 its 't' flag.
1900 1903 '''
1901 1904 store = self._storage()
1902 1905 if shallow:
1903 1906 r = store.rev(self._node)
1904 1907 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1905 1908 return manifestdict(d)
1906 1909 else:
1907 1910 # Need to perform a slow delta
1908 1911 r0 = store.deltaparent(store.rev(self._node))
1909 1912 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1910 1913 m1 = self.read()
1911 1914 md = treemanifest(dir=self._dir)
1912 1915 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1913 1916 if n1:
1914 1917 md[f] = n1
1915 1918 if fl1:
1916 1919 md.setflag(f, fl1)
1917 1920 return md
1918 1921
1919 1922 def readfast(self, shallow=False):
1920 1923 '''Calls either readdelta or read, based on which would be less work.
1921 1924 readdelta is called if the delta is against the p1, and therefore can be
1922 1925 read quickly.
1923 1926
1924 1927 If `shallow` is True, it only returns the entries from this manifest,
1925 1928 and not any submanifests.
1926 1929 '''
1927 1930 store = self._storage()
1928 1931 r = store.rev(self._node)
1929 1932 deltaparent = store.deltaparent(r)
1930 1933 if (deltaparent != nullrev and
1931 1934 deltaparent in store.parentrevs(r)):
1932 1935 return self.readdelta(shallow=shallow)
1933 1936
1934 1937 if shallow:
1935 1938 return manifestdict(store.revision(self._node))
1936 1939 else:
1937 1940 return self.read()
1938 1941
1939 1942 def find(self, key):
1940 1943 return self.read().find(key)
1941 1944
1942 1945 class excludeddir(treemanifest):
1943 1946 """Stand-in for a directory that is excluded from the repository.
1944 1947
1945 1948 With narrowing active on a repository that uses treemanifests,
1946 1949 some of the directory revlogs will be excluded from the resulting
1947 1950 clone. This is a huge storage win for clients, but means we need
1948 1951 some sort of pseudo-manifest to surface to internals so we can
1949 1952 detect a merge conflict outside the narrowspec. That's what this
1950 1953 class is: it stands in for a directory whose node is known, but
1951 1954 whose contents are unknown.
1952 1955 """
1953 1956 def __init__(self, dir, node):
1954 1957 super(excludeddir, self).__init__(dir)
1955 1958 self._node = node
1956 1959 # Add an empty file, which will be included by iterators and such,
1957 1960 # appearing as the directory itself (i.e. something like "dir/")
1958 1961 self._files[''] = node
1959 1962 self._flags[''] = 't'
1960 1963
1961 1964 # Manifests outside the narrowspec should never be modified, so avoid
1962 1965 # copying. This makes a noticeable difference when there are very many
1963 1966 # directories outside the narrowspec. Also, it makes sense for the copy to
1964 1967 # be of the same type as the original, which would not happen with the
1965 1968 # super type's copy().
1966 1969 def copy(self):
1967 1970 return self
1968 1971
1969 1972 class excludeddirmanifestctx(treemanifestctx):
1970 1973 """context wrapper for excludeddir - see that docstring for rationale"""
1971 1974 def __init__(self, dir, node):
1972 1975 self._dir = dir
1973 1976 self._node = node
1974 1977
1975 1978 def read(self):
1976 1979 return excludeddir(self._dir, self._node)
1977 1980
1978 1981 def write(self, *args):
1979 1982 raise error.ProgrammingError(
1980 1983 'attempt to write manifest from excluded dir %s' % self._dir)
1981 1984
1982 1985 class excludedmanifestrevlog(manifestrevlog):
1983 1986 """Stand-in for excluded treemanifest revlogs.
1984 1987
1985 1988 When narrowing is active on a treemanifest repository, we'll have
1986 1989 references to directories we can't see due to the revlog being
1987 1990 skipped. This class exists to conform to the manifestrevlog
1988 1991 interface for those directories and proactively prevent writes to
1989 1992 outside the narrowspec.
1990 1993 """
1991 1994
1992 1995 def __init__(self, dir):
1993 1996 self._dir = dir
1994 1997
1995 1998 def __len__(self):
1996 1999 raise error.ProgrammingError(
1997 2000 'attempt to get length of excluded dir %s' % self._dir)
1998 2001
1999 2002 def rev(self, node):
2000 2003 raise error.ProgrammingError(
2001 2004 'attempt to get rev from excluded dir %s' % self._dir)
2002 2005
2003 2006 def linkrev(self, node):
2004 2007 raise error.ProgrammingError(
2005 2008 'attempt to get linkrev from excluded dir %s' % self._dir)
2006 2009
2007 2010 def node(self, rev):
2008 2011 raise error.ProgrammingError(
2009 2012 'attempt to get node from excluded dir %s' % self._dir)
2010 2013
2011 2014 def add(self, *args, **kwargs):
2012 2015 # We should never write entries in dirlogs outside the narrow clone.
2013 2016 # However, the method still gets called from writesubtree() in
2014 2017 # _addtree(), so we need to handle it. We should possibly make that
2015 2018 # avoid calling add() with a clean manifest (_dirty is always False
2016 2019 # in excludeddir instances).
2017 2020 pass
@@ -1,1634 +1,1642 b''
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 )
14 14 from .utils import (
15 15 interfaceutil,
16 16 )
17 17
18 18 # When narrowing is finalized and no longer subject to format changes,
19 19 # we should move this to just "narrow" or similar.
20 20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21 21
22 22 # Local repository feature string.
23 23
24 24 # Revlogs are being used for file storage.
25 25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 26 # The storage part of the repository is shared from an external source.
27 27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 28 # LFS supported for backing file storage.
29 29 REPO_FEATURE_LFS = b'lfs'
30 30
31 31 class ipeerconnection(interfaceutil.Interface):
32 32 """Represents a "connection" to a repository.
33 33
34 34 This is the base interface for representing a connection to a repository.
35 35 It holds basic properties and methods applicable to all peer types.
36 36
37 37 This is not a complete interface definition and should not be used
38 38 outside of this module.
39 39 """
40 40 ui = interfaceutil.Attribute("""ui.ui instance""")
41 41
42 42 def url():
43 43 """Returns a URL string representing this peer.
44 44
45 45 Currently, implementations expose the raw URL used to construct the
46 46 instance. It may contain credentials as part of the URL. The
47 47 expectations of the value aren't well-defined and this could lead to
48 48 data leakage.
49 49
50 50 TODO audit/clean consumers and more clearly define the contents of this
51 51 value.
52 52 """
53 53
54 54 def local():
55 55 """Returns a local repository instance.
56 56
57 57 If the peer represents a local repository, returns an object that
58 58 can be used to interface with it. Otherwise returns ``None``.
59 59 """
60 60
61 61 def peer():
62 62 """Returns an object conforming to this interface.
63 63
64 64 Most implementations will ``return self``.
65 65 """
66 66
67 67 def canpush():
68 68 """Returns a boolean indicating if this peer can be pushed to."""
69 69
70 70 def close():
71 71 """Close the connection to this peer.
72 72
73 73 This is called when the peer will no longer be used. Resources
74 74 associated with the peer should be cleaned up.
75 75 """
76 76
77 77 class ipeercapabilities(interfaceutil.Interface):
78 78 """Peer sub-interface related to capabilities."""
79 79
80 80 def capable(name):
81 81 """Determine support for a named capability.
82 82
83 83 Returns ``False`` if capability not supported.
84 84
85 85 Returns ``True`` if boolean capability is supported. Returns a string
86 86 if capability support is non-boolean.
87 87
88 88 Capability strings may or may not map to wire protocol capabilities.
89 89 """
90 90
91 91 def requirecap(name, purpose):
92 92 """Require a capability to be present.
93 93
94 94 Raises a ``CapabilityError`` if the capability isn't present.
95 95 """
96 96
97 97 class ipeercommands(interfaceutil.Interface):
98 98 """Client-side interface for communicating over the wire protocol.
99 99
100 100 This interface is used as a gateway to the Mercurial wire protocol.
101 101 methods commonly call wire protocol commands of the same name.
102 102 """
103 103
104 104 def branchmap():
105 105 """Obtain heads in named branches.
106 106
107 107 Returns a dict mapping branch name to an iterable of nodes that are
108 108 heads on that branch.
109 109 """
110 110
111 111 def capabilities():
112 112 """Obtain capabilities of the peer.
113 113
114 114 Returns a set of string capabilities.
115 115 """
116 116
117 117 def clonebundles():
118 118 """Obtains the clone bundles manifest for the repo.
119 119
120 120 Returns the manifest as unparsed bytes.
121 121 """
122 122
123 123 def debugwireargs(one, two, three=None, four=None, five=None):
124 124 """Used to facilitate debugging of arguments passed over the wire."""
125 125
126 126 def getbundle(source, **kwargs):
127 127 """Obtain remote repository data as a bundle.
128 128
129 129 This command is how the bulk of repository data is transferred from
130 130 the peer to the local repository
131 131
132 132 Returns a generator of bundle data.
133 133 """
134 134
135 135 def heads():
136 136 """Determine all known head revisions in the peer.
137 137
138 138 Returns an iterable of binary nodes.
139 139 """
140 140
141 141 def known(nodes):
142 142 """Determine whether multiple nodes are known.
143 143
144 144 Accepts an iterable of nodes whose presence to check for.
145 145
146 146 Returns an iterable of booleans indicating of the corresponding node
147 147 at that index is known to the peer.
148 148 """
149 149
150 150 def listkeys(namespace):
151 151 """Obtain all keys in a pushkey namespace.
152 152
153 153 Returns an iterable of key names.
154 154 """
155 155
156 156 def lookup(key):
157 157 """Resolve a value to a known revision.
158 158
159 159 Returns a binary node of the resolved revision on success.
160 160 """
161 161
162 162 def pushkey(namespace, key, old, new):
163 163 """Set a value using the ``pushkey`` protocol.
164 164
165 165 Arguments correspond to the pushkey namespace and key to operate on and
166 166 the old and new values for that key.
167 167
168 168 Returns a string with the peer result. The value inside varies by the
169 169 namespace.
170 170 """
171 171
172 172 def stream_out():
173 173 """Obtain streaming clone data.
174 174
175 175 Successful result should be a generator of data chunks.
176 176 """
177 177
178 178 def unbundle(bundle, heads, url):
179 179 """Transfer repository data to the peer.
180 180
181 181 This is how the bulk of data during a push is transferred.
182 182
183 183 Returns the integer number of heads added to the peer.
184 184 """
185 185
186 186 class ipeerlegacycommands(interfaceutil.Interface):
187 187 """Interface for implementing support for legacy wire protocol commands.
188 188
189 189 Wire protocol commands transition to legacy status when they are no longer
190 190 used by modern clients. To facilitate identifying which commands are
191 191 legacy, the interfaces are split.
192 192 """
193 193
194 194 def between(pairs):
195 195 """Obtain nodes between pairs of nodes.
196 196
197 197 ``pairs`` is an iterable of node pairs.
198 198
199 199 Returns an iterable of iterables of nodes corresponding to each
200 200 requested pair.
201 201 """
202 202
203 203 def branches(nodes):
204 204 """Obtain ancestor changesets of specific nodes back to a branch point.
205 205
206 206 For each requested node, the peer finds the first ancestor node that is
207 207 a DAG root or is a merge.
208 208
209 209 Returns an iterable of iterables with the resolved values for each node.
210 210 """
211 211
212 212 def changegroup(nodes, source):
213 213 """Obtain a changegroup with data for descendants of specified nodes."""
214 214
215 215 def changegroupsubset(bases, heads, source):
216 216 pass
217 217
218 218 class ipeercommandexecutor(interfaceutil.Interface):
219 219 """Represents a mechanism to execute remote commands.
220 220
221 221 This is the primary interface for requesting that wire protocol commands
222 222 be executed. Instances of this interface are active in a context manager
223 223 and have a well-defined lifetime. When the context manager exits, all
224 224 outstanding requests are waited on.
225 225 """
226 226
227 227 def callcommand(name, args):
228 228 """Request that a named command be executed.
229 229
230 230 Receives the command name and a dictionary of command arguments.
231 231
232 232 Returns a ``concurrent.futures.Future`` that will resolve to the
233 233 result of that command request. That exact value is left up to
234 234 the implementation and possibly varies by command.
235 235
236 236 Not all commands can coexist with other commands in an executor
237 237 instance: it depends on the underlying wire protocol transport being
238 238 used and the command itself.
239 239
240 240 Implementations MAY call ``sendcommands()`` automatically if the
241 241 requested command can not coexist with other commands in this executor.
242 242
243 243 Implementations MAY call ``sendcommands()`` automatically when the
244 244 future's ``result()`` is called. So, consumers using multiple
245 245 commands with an executor MUST ensure that ``result()`` is not called
246 246 until all command requests have been issued.
247 247 """
248 248
249 249 def sendcommands():
250 250 """Trigger submission of queued command requests.
251 251
252 252 Not all transports submit commands as soon as they are requested to
253 253 run. When called, this method forces queued command requests to be
254 254 issued. It will no-op if all commands have already been sent.
255 255
256 256 When called, no more new commands may be issued with this executor.
257 257 """
258 258
259 259 def close():
260 260 """Signal that this command request is finished.
261 261
262 262 When called, no more new commands may be issued. All outstanding
263 263 commands that have previously been issued are waited on before
264 264 returning. This not only includes waiting for the futures to resolve,
265 265 but also waiting for all response data to arrive. In other words,
266 266 calling this waits for all on-wire state for issued command requests
267 267 to finish.
268 268
269 269 When used as a context manager, this method is called when exiting the
270 270 context manager.
271 271
272 272 This method may call ``sendcommands()`` if there are buffered commands.
273 273 """
274 274
275 275 class ipeerrequests(interfaceutil.Interface):
276 276 """Interface for executing commands on a peer."""
277 277
278 278 def commandexecutor():
279 279 """A context manager that resolves to an ipeercommandexecutor.
280 280
281 281 The object this resolves to can be used to issue command requests
282 282 to the peer.
283 283
284 284 Callers should call its ``callcommand`` method to issue command
285 285 requests.
286 286
287 287 A new executor should be obtained for each distinct set of commands
288 288 (possibly just a single command) that the consumer wants to execute
289 289 as part of a single operation or round trip. This is because some
290 290 peers are half-duplex and/or don't support persistent connections.
291 291 e.g. in the case of HTTP peers, commands sent to an executor represent
292 292 a single HTTP request. While some peers may support multiple command
293 293 sends over the wire per executor, consumers need to code to the least
294 294 capable peer. So it should be assumed that command executors buffer
295 295 called commands until they are told to send them and that each
296 296 command executor could result in a new connection or wire-level request
297 297 being issued.
298 298 """
299 299
300 300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
301 301 """Unified interface for peer repositories.
302 302
303 303 All peer instances must conform to this interface.
304 304 """
305 305
306 306 @interfaceutil.implementer(ipeerbase)
307 307 class peer(object):
308 308 """Base class for peer repositories."""
309 309
310 310 def capable(self, name):
311 311 caps = self.capabilities()
312 312 if name in caps:
313 313 return True
314 314
315 315 name = '%s=' % name
316 316 for cap in caps:
317 317 if cap.startswith(name):
318 318 return cap[len(name):]
319 319
320 320 return False
321 321
322 322 def requirecap(self, name, purpose):
323 323 if self.capable(name):
324 324 return
325 325
326 326 raise error.CapabilityError(
327 327 _('cannot %s; remote repository does not support the %r '
328 328 'capability') % (purpose, name))
329 329
330 330 class iverifyproblem(interfaceutil.Interface):
331 331 """Represents a problem with the integrity of the repository.
332 332
333 333 Instances of this interface are emitted to describe an integrity issue
334 334 with a repository (e.g. corrupt storage, missing data, etc).
335 335
336 336 Instances are essentially messages associated with severity.
337 337 """
338 338 warning = interfaceutil.Attribute(
339 339 """Message indicating a non-fatal problem.""")
340 340
341 341 error = interfaceutil.Attribute(
342 342 """Message indicating a fatal problem.""")
343 343
344 344 class irevisiondelta(interfaceutil.Interface):
345 345 """Represents a delta between one revision and another.
346 346
347 347 Instances convey enough information to allow a revision to be exchanged
348 348 with another repository.
349 349
350 350 Instances represent the fulltext revision data or a delta against
351 351 another revision. Therefore the ``revision`` and ``delta`` attributes
352 352 are mutually exclusive.
353 353
354 354 Typically used for changegroup generation.
355 355 """
356 356
357 357 node = interfaceutil.Attribute(
358 358 """20 byte node of this revision.""")
359 359
360 360 p1node = interfaceutil.Attribute(
361 361 """20 byte node of 1st parent of this revision.""")
362 362
363 363 p2node = interfaceutil.Attribute(
364 364 """20 byte node of 2nd parent of this revision.""")
365 365
366 366 linknode = interfaceutil.Attribute(
367 367 """20 byte node of the changelog revision this node is linked to.""")
368 368
369 369 flags = interfaceutil.Attribute(
370 370 """2 bytes of integer flags that apply to this revision.""")
371 371
372 372 basenode = interfaceutil.Attribute(
373 373 """20 byte node of the revision this data is a delta against.
374 374
375 375 ``nullid`` indicates that the revision is a full revision and not
376 376 a delta.
377 377 """)
378 378
379 379 baserevisionsize = interfaceutil.Attribute(
380 380 """Size of base revision this delta is against.
381 381
382 382 May be ``None`` if ``basenode`` is ``nullid``.
383 383 """)
384 384
385 385 revision = interfaceutil.Attribute(
386 386 """Raw fulltext of revision data for this node.""")
387 387
388 388 delta = interfaceutil.Attribute(
389 389 """Delta between ``basenode`` and ``node``.
390 390
391 391 Stored in the bdiff delta format.
392 392 """)
393 393
394 394 class irevisiondeltarequest(interfaceutil.Interface):
395 395 """Represents a request to generate an ``irevisiondelta``."""
396 396
397 397 node = interfaceutil.Attribute(
398 398 """20 byte node of revision being requested.""")
399 399
400 400 p1node = interfaceutil.Attribute(
401 401 """20 byte node of 1st parent of revision.""")
402 402
403 403 p2node = interfaceutil.Attribute(
404 404 """20 byte node of 2nd parent of revision.""")
405 405
406 406 linknode = interfaceutil.Attribute(
407 407 """20 byte node to store in ``linknode`` attribute.""")
408 408
409 409 basenode = interfaceutil.Attribute(
410 410 """Base revision that delta should be generated against.
411 411
412 412 If ``nullid``, the derived ``irevisiondelta`` should have its
413 413 ``revision`` field populated and no delta should be generated.
414 414
415 415 If ``None``, the delta may be generated against any revision that
416 416 is an ancestor of this revision. Or a full revision may be used.
417 417
418 418 If any other value, the delta should be produced against that
419 419 revision.
420 420 """)
421 421
422 422 ellipsis = interfaceutil.Attribute(
423 423 """Boolean on whether the ellipsis flag should be set.""")
424 424
425 425 class ifilerevisionssequence(interfaceutil.Interface):
426 426 """Contains index data for all revisions of a file.
427 427
428 428 Types implementing this behave like lists of tuples. The index
429 429 in the list corresponds to the revision number. The values contain
430 430 index metadata.
431 431
432 432 The *null* revision (revision number -1) is always the last item
433 433 in the index.
434 434 """
435 435
436 436 def __len__():
437 437 """The total number of revisions."""
438 438
439 439 def __getitem__(rev):
440 440 """Returns the object having a specific revision number.
441 441
442 442 Returns an 8-tuple with the following fields:
443 443
444 444 offset+flags
445 445 Contains the offset and flags for the revision. 64-bit unsigned
446 446 integer where first 6 bytes are the offset and the next 2 bytes
447 447 are flags. The offset can be 0 if it is not used by the store.
448 448 compressed size
449 449 Size of the revision data in the store. It can be 0 if it isn't
450 450 needed by the store.
451 451 uncompressed size
452 452 Fulltext size. It can be 0 if it isn't needed by the store.
453 453 base revision
454 454 Revision number of revision the delta for storage is encoded
455 455 against. -1 indicates not encoded against a base revision.
456 456 link revision
457 457 Revision number of changelog revision this entry is related to.
458 458 p1 revision
459 459 Revision number of 1st parent. -1 if no 1st parent.
460 460 p2 revision
461 461 Revision number of 2nd parent. -1 if no 1st parent.
462 462 node
463 463 Binary node value for this revision number.
464 464
465 465 Negative values should index off the end of the sequence. ``-1``
466 466 should return the null revision. ``-2`` should return the most
467 467 recent revision.
468 468 """
469 469
470 470 def __contains__(rev):
471 471 """Whether a revision number exists."""
472 472
473 473 def insert(self, i, entry):
474 474 """Add an item to the index at specific revision."""
475 475
476 476 class ifileindex(interfaceutil.Interface):
477 477 """Storage interface for index data of a single file.
478 478
479 479 File storage data is divided into index metadata and data storage.
480 480 This interface defines the index portion of the interface.
481 481
482 482 The index logically consists of:
483 483
484 484 * A mapping between revision numbers and nodes.
485 485 * DAG data (storing and querying the relationship between nodes).
486 486 * Metadata to facilitate storage.
487 487 """
488 488 index = interfaceutil.Attribute(
489 489 """An ``ifilerevisionssequence`` instance.""")
490 490
491 491 def __len__():
492 492 """Obtain the number of revisions stored for this file."""
493 493
494 494 def __iter__():
495 495 """Iterate over revision numbers for this file."""
496 496
497 497 def revs(start=0, stop=None):
498 498 """Iterate over revision numbers for this file, with control."""
499 499
500 500 def parents(node):
501 501 """Returns a 2-tuple of parent nodes for a revision.
502 502
503 503 Values will be ``nullid`` if the parent is empty.
504 504 """
505 505
506 506 def parentrevs(rev):
507 507 """Like parents() but operates on revision numbers."""
508 508
509 509 def rev(node):
510 510 """Obtain the revision number given a node.
511 511
512 512 Raises ``error.LookupError`` if the node is not known.
513 513 """
514 514
515 515 def node(rev):
516 516 """Obtain the node value given a revision number.
517 517
518 518 Raises ``IndexError`` if the node is not known.
519 519 """
520 520
521 521 def lookup(node):
522 522 """Attempt to resolve a value to a node.
523 523
524 524 Value can be a binary node, hex node, revision number, or a string
525 525 that can be converted to an integer.
526 526
527 527 Raises ``error.LookupError`` if a node could not be resolved.
528 528 """
529 529
530 530 def linkrev(rev):
531 531 """Obtain the changeset revision number a revision is linked to."""
532 532
533 533 def flags(rev):
534 534 """Obtain flags used to affect storage of a revision."""
535 535
536 536 def iscensored(rev):
537 537 """Return whether a revision's content has been censored."""
538 538
539 539 def commonancestorsheads(node1, node2):
540 540 """Obtain an iterable of nodes containing heads of common ancestors.
541 541
542 542 See ``ancestor.commonancestorsheads()``.
543 543 """
544 544
545 545 def descendants(revs):
546 546 """Obtain descendant revision numbers for a set of revision numbers.
547 547
548 548 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
549 549 """
550 550
551 551 def heads(start=None, stop=None):
552 552 """Obtain a list of nodes that are DAG heads, with control.
553 553
554 554 The set of revisions examined can be limited by specifying
555 555 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
556 556 iterable of nodes. DAG traversal starts at earlier revision
557 557 ``start`` and iterates forward until any node in ``stop`` is
558 558 encountered.
559 559 """
560 560
561 561 def children(node):
562 562 """Obtain nodes that are children of a node.
563 563
564 564 Returns a list of nodes.
565 565 """
566 566
567 567 def deltaparent(rev):
568 568 """"Return the revision that is a suitable parent to delta against."""
569 569
570 570 class ifiledata(interfaceutil.Interface):
571 571 """Storage interface for data storage of a specific file.
572 572
573 573 This complements ``ifileindex`` and provides an interface for accessing
574 574 data for a tracked file.
575 575 """
576 576 def rawsize(rev):
577 577 """The size of the fulltext data for a revision as stored."""
578 578
579 579 def size(rev):
580 580 """Obtain the fulltext size of file data.
581 581
582 582 Any metadata is excluded from size measurements. Use ``rawsize()`` if
583 583 metadata size is important.
584 584 """
585 585
586 586 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
587 587 """Validate the stored hash of a given fulltext and node.
588 588
589 589 Raises ``error.StorageError`` is hash validation fails.
590 590 """
591 591
592 592 def revision(node, raw=False):
593 593 """"Obtain fulltext data for a node.
594 594
595 595 By default, any storage transformations are applied before the data
596 596 is returned. If ``raw`` is True, non-raw storage transformations
597 597 are not applied.
598 598
599 599 The fulltext data may contain a header containing metadata. Most
600 600 consumers should use ``read()`` to obtain the actual file data.
601 601 """
602 602
603 603 def read(node):
604 604 """Resolve file fulltext data.
605 605
606 606 This is similar to ``revision()`` except any metadata in the data
607 607 headers is stripped.
608 608 """
609 609
610 610 def renamed(node):
611 611 """Obtain copy metadata for a node.
612 612
613 613 Returns ``False`` if no copy metadata is stored or a 2-tuple of
614 614 (path, node) from which this revision was copied.
615 615 """
616 616
617 617 def cmp(node, fulltext):
618 618 """Compare fulltext to another revision.
619 619
620 620 Returns True if the fulltext is different from what is stored.
621 621
622 622 This takes copy metadata into account.
623 623
624 624 TODO better document the copy metadata and censoring logic.
625 625 """
626 626
627 627 def revdiff(rev1, rev2):
628 628 """Obtain a delta between two revision numbers.
629 629
630 630 Operates on raw data in the store (``revision(node, raw=True)``).
631 631
632 632 The returned data is the result of ``bdiff.bdiff`` on the raw
633 633 revision data.
634 634 """
635 635
636 636 def emitrevisiondeltas(requests):
637 637 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
638 638
639 639 Given an iterable of objects conforming to the ``irevisiondeltarequest``
640 640 interface, emits objects conforming to the ``irevisiondelta``
641 641 interface.
642 642
643 643 This method is a generator.
644 644
645 645 ``irevisiondelta`` should be emitted in the same order of
646 646 ``irevisiondeltarequest`` that was passed in.
647 647
648 648 The emitted objects MUST conform by the results of
649 649 ``irevisiondeltarequest``. Namely, they must respect any requests
650 650 for building a delta from a specific ``basenode`` if defined.
651 651
652 652 When sending deltas, implementations must take into account whether
653 653 the client has the base delta before encoding a delta against that
654 654 revision. A revision encountered previously in ``requests`` is
655 655 always a suitable base revision. An example of a bad delta is a delta
656 656 against a non-ancestor revision. Another example of a bad delta is a
657 657 delta against a censored revision.
658 658 """
659 659
660 660 class ifilemutation(interfaceutil.Interface):
661 661 """Storage interface for mutation events of a tracked file."""
662 662
663 663 def add(filedata, meta, transaction, linkrev, p1, p2):
664 664 """Add a new revision to the store.
665 665
666 666 Takes file data, dictionary of metadata, a transaction, linkrev,
667 667 and parent nodes.
668 668
669 669 Returns the node that was added.
670 670
671 671 May no-op if a revision matching the supplied data is already stored.
672 672 """
673 673
674 674 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
675 675 flags=0, cachedelta=None):
676 676 """Add a new revision to the store.
677 677
678 678 This is similar to ``add()`` except it operates at a lower level.
679 679
680 680 The data passed in already contains a metadata header, if any.
681 681
682 682 ``node`` and ``flags`` can be used to define the expected node and
683 683 the flags to use with storage.
684 684
685 685 ``add()`` is usually called when adding files from e.g. the working
686 686 directory. ``addrevision()`` is often called by ``add()`` and for
687 687 scenarios where revision data has already been computed, such as when
688 688 applying raw data from a peer repo.
689 689 """
690 690
691 691 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
692 692 """Process a series of deltas for storage.
693 693
694 694 ``deltas`` is an iterable of 7-tuples of
695 695 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
696 696 to add.
697 697
698 698 The ``delta`` field contains ``mpatch`` data to apply to a base
699 699 revision, identified by ``deltabase``. The base node can be
700 700 ``nullid``, in which case the header from the delta can be ignored
701 701 and the delta used as the fulltext.
702 702
703 703 ``addrevisioncb`` should be called for each node as it is committed.
704 704
705 705 Returns a list of nodes that were processed. A node will be in the list
706 706 even if it existed in the store previously.
707 707 """
708 708
709 709 def censorrevision(tr, node, tombstone=b''):
710 710 """Remove the content of a single revision.
711 711
712 712 The specified ``node`` will have its content purged from storage.
713 713 Future attempts to access the revision data for this node will
714 714 result in failure.
715 715
716 716 A ``tombstone`` message can optionally be stored. This message may be
717 717 displayed to users when they attempt to access the missing revision
718 718 data.
719 719
720 720 Storage backends may have stored deltas against the previous content
721 721 in this revision. As part of censoring a revision, these storage
722 722 backends are expected to rewrite any internally stored deltas such
723 723 that they no longer reference the deleted content.
724 724 """
725 725
726 726 def getstrippoint(minlink):
727 727 """Find the minimum revision that must be stripped to strip a linkrev.
728 728
729 729 Returns a 2-tuple containing the minimum revision number and a set
730 730 of all revisions numbers that would be broken by this strip.
731 731
732 732 TODO this is highly revlog centric and should be abstracted into
733 733 a higher-level deletion API. ``repair.strip()`` relies on this.
734 734 """
735 735
736 736 def strip(minlink, transaction):
737 737 """Remove storage of items starting at a linkrev.
738 738
739 739 This uses ``getstrippoint()`` to determine the first node to remove.
740 740 Then it effectively truncates storage for all revisions after that.
741 741
742 742 TODO this is highly revlog centric and should be abstracted into a
743 743 higher-level deletion API.
744 744 """
745 745
746 746 class ifilestorage(ifileindex, ifiledata, ifilemutation):
747 747 """Complete storage interface for a single tracked file."""
748 748
749 749 _generaldelta = interfaceutil.Attribute(
750 750 """Whether deltas can be against any parent revision.
751 751
752 752 TODO this is used by changegroup code and it could probably be
753 753 folded into another API.
754 754 """)
755 755
756 756 def files():
757 757 """Obtain paths that are backing storage for this file.
758 758
759 759 TODO this is used heavily by verify code and there should probably
760 760 be a better API for that.
761 761 """
762 762
763 763 def verifyintegrity(state):
764 764 """Verifies the integrity of file storage.
765 765
766 766 ``state`` is a dict holding state of the verifier process. It can be
767 767 used to communicate data between invocations of multiple storage
768 768 primitives.
769 769
770 770 The method yields objects conforming to the ``iverifyproblem``
771 771 interface.
772 772 """
773 773
774 774 class idirs(interfaceutil.Interface):
775 775 """Interface representing a collection of directories from paths.
776 776
777 777 This interface is essentially a derived data structure representing
778 778 directories from a collection of paths.
779 779 """
780 780
781 781 def addpath(path):
782 782 """Add a path to the collection.
783 783
784 784 All directories in the path will be added to the collection.
785 785 """
786 786
787 787 def delpath(path):
788 788 """Remove a path from the collection.
789 789
790 790 If the removal was the last path in a particular directory, the
791 791 directory is removed from the collection.
792 792 """
793 793
794 794 def __iter__():
795 795 """Iterate over the directories in this collection of paths."""
796 796
797 797 def __contains__(path):
798 798 """Whether a specific directory is in this collection."""
799 799
800 800 class imanifestdict(interfaceutil.Interface):
801 801 """Interface representing a manifest data structure.
802 802
803 803 A manifest is effectively a dict mapping paths to entries. Each entry
804 804 consists of a binary node and extra flags affecting that entry.
805 805 """
806 806
807 807 def __getitem__(path):
808 808 """Returns the binary node value for a path in the manifest.
809 809
810 810 Raises ``KeyError`` if the path does not exist in the manifest.
811 811
812 812 Equivalent to ``self.find(path)[0]``.
813 813 """
814 814
815 815 def find(path):
816 816 """Returns the entry for a path in the manifest.
817 817
818 818 Returns a 2-tuple of (node, flags).
819 819
820 820 Raises ``KeyError`` if the path does not exist in the manifest.
821 821 """
822 822
823 823 def __len__():
824 824 """Return the number of entries in the manifest."""
825 825
826 826 def __nonzero__():
827 827 """Returns True if the manifest has entries, False otherwise."""
828 828
829 829 __bool__ = __nonzero__
830 830
831 831 def __setitem__(path, node):
832 832 """Define the node value for a path in the manifest.
833 833
834 834 If the path is already in the manifest, its flags will be copied to
835 835 the new entry.
836 836 """
837 837
838 838 def __contains__(path):
839 839 """Whether a path exists in the manifest."""
840 840
841 841 def __delitem__(path):
842 842 """Remove a path from the manifest.
843 843
844 844 Raises ``KeyError`` if the path is not in the manifest.
845 845 """
846 846
847 847 def __iter__():
848 848 """Iterate over paths in the manifest."""
849 849
850 850 def iterkeys():
851 851 """Iterate over paths in the manifest."""
852 852
853 853 def keys():
854 854 """Obtain a list of paths in the manifest."""
855 855
856 856 def filesnotin(other, match=None):
857 857 """Obtain the set of paths in this manifest but not in another.
858 858
859 859 ``match`` is an optional matcher function to be applied to both
860 860 manifests.
861 861
862 862 Returns a set of paths.
863 863 """
864 864
865 865 def dirs():
866 866 """Returns an object implementing the ``idirs`` interface."""
867 867
868 868 def hasdir(dir):
869 869 """Returns a bool indicating if a directory is in this manifest."""
870 870
871 871 def matches(match):
872 872 """Generate a new manifest filtered through a matcher.
873 873
874 874 Returns an object conforming to the ``imanifestdict`` interface.
875 875 """
876 876
877 877 def walk(match):
878 878 """Generator of paths in manifest satisfying a matcher.
879 879
880 880 This is equivalent to ``self.matches(match).iterkeys()`` except a new
881 881 manifest object is not created.
882 882
883 883 If the matcher has explicit files listed and they don't exist in
884 884 the manifest, ``match.bad()`` is called for each missing file.
885 885 """
886 886
887 887 def diff(other, match=None, clean=False):
888 888 """Find differences between this manifest and another.
889 889
890 890 This manifest is compared to ``other``.
891 891
892 892 If ``match`` is provided, the two manifests are filtered against this
893 893 matcher and only entries satisfying the matcher are compared.
894 894
895 895 If ``clean`` is True, unchanged files are included in the returned
896 896 object.
897 897
898 898 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
899 899 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
900 900 represents the node and flags for this manifest and ``(node2, flag2)``
901 901 are the same for the other manifest.
902 902 """
903 903
904 904 def setflag(path, flag):
905 905 """Set the flag value for a given path.
906 906
907 907 Raises ``KeyError`` if the path is not already in the manifest.
908 908 """
909 909
910 910 def get(path, default=None):
911 911 """Obtain the node value for a path or a default value if missing."""
912 912
913 913 def flags(path, default=''):
914 914 """Return the flags value for a path or a default value if missing."""
915 915
916 916 def copy():
917 917 """Return a copy of this manifest."""
918 918
919 919 def items():
920 920 """Returns an iterable of (path, node) for items in this manifest."""
921 921
922 922 def iteritems():
923 923 """Identical to items()."""
924 924
925 925 def iterentries():
926 926 """Returns an iterable of (path, node, flags) for this manifest.
927 927
928 928 Similar to ``iteritems()`` except items are a 3-tuple and include
929 929 flags.
930 930 """
931 931
932 932 def text():
933 933 """Obtain the raw data representation for this manifest.
934 934
935 935 Result is used to create a manifest revision.
936 936 """
937 937
938 938 def fastdelta(base, changes):
939 939 """Obtain a delta between this manifest and another given changes.
940 940
941 941 ``base`` in the raw data representation for another manifest.
942 942
943 943 ``changes`` is an iterable of ``(path, to_delete)``.
944 944
945 945 Returns a 2-tuple containing ``bytearray(self.text())`` and the
946 946 delta between ``base`` and this manifest.
947 947 """
948 948
949 949 class imanifestrevisionbase(interfaceutil.Interface):
950 950 """Base interface representing a single revision of a manifest.
951 951
952 952 Should not be used as a primary interface: should always be inherited
953 953 as part of a larger interface.
954 954 """
955 955
956 956 def new():
957 957 """Obtain a new manifest instance.
958 958
959 959 Returns an object conforming to the ``imanifestrevisionwritable``
960 960 interface. The instance will be associated with the same
961 961 ``imanifestlog`` collection as this instance.
962 962 """
963 963
964 964 def copy():
965 965 """Obtain a copy of this manifest instance.
966 966
967 967 Returns an object conforming to the ``imanifestrevisionwritable``
968 968 interface. The instance will be associated with the same
969 969 ``imanifestlog`` collection as this instance.
970 970 """
971 971
972 972 def read():
973 973 """Obtain the parsed manifest data structure.
974 974
975 975 The returned object conforms to the ``imanifestdict`` interface.
976 976 """
977 977
978 978 class imanifestrevisionstored(imanifestrevisionbase):
979 979 """Interface representing a manifest revision committed to storage."""
980 980
981 981 def node():
982 982 """The binary node for this manifest."""
983 983
984 984 parents = interfaceutil.Attribute(
985 985 """List of binary nodes that are parents for this manifest revision."""
986 986 )
987 987
988 988 def readdelta(shallow=False):
989 989 """Obtain the manifest data structure representing changes from parent.
990 990
991 991 This manifest is compared to its 1st parent. A new manifest representing
992 992 those differences is constructed.
993 993
994 994 The returned object conforms to the ``imanifestdict`` interface.
995 995 """
996 996
997 997 def readfast(shallow=False):
998 998 """Calls either ``read()`` or ``readdelta()``.
999 999
1000 1000 The faster of the two options is called.
1001 1001 """
1002 1002
1003 1003 def find(key):
1004 1004 """Calls self.read().find(key)``.
1005 1005
1006 1006 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1007 1007 """
1008 1008
1009 1009 class imanifestrevisionwritable(imanifestrevisionbase):
1010 1010 """Interface representing a manifest revision that can be committed."""
1011 1011
1012 1012 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1013 1013 """Add this revision to storage.
1014 1014
1015 1015 Takes a transaction object, the changeset revision number it will
1016 1016 be associated with, its parent nodes, and lists of added and
1017 1017 removed paths.
1018 1018
1019 1019 If match is provided, storage can choose not to inspect or write out
1020 1020 items that do not match. Storage is still required to be able to provide
1021 1021 the full manifest in the future for any directories written (these
1022 1022 manifests should not be "narrowed on disk").
1023 1023
1024 1024 Returns the binary node of the created revision.
1025 1025 """
1026 1026
1027 1027 class imanifeststorage(interfaceutil.Interface):
1028 1028 """Storage interface for manifest data."""
1029 1029
1030 1030 tree = interfaceutil.Attribute(
1031 1031 """The path to the directory this manifest tracks.
1032 1032
1033 1033 The empty bytestring represents the root manifest.
1034 1034 """)
1035 1035
1036 1036 index = interfaceutil.Attribute(
1037 1037 """An ``ifilerevisionssequence`` instance.""")
1038 1038
1039 1039 indexfile = interfaceutil.Attribute(
1040 1040 """Path of revlog index file.
1041 1041
1042 1042 TODO this is revlog specific and should not be exposed.
1043 1043 """)
1044 1044
1045 1045 opener = interfaceutil.Attribute(
1046 1046 """VFS opener to use to access underlying files used for storage.
1047 1047
1048 1048 TODO this is revlog specific and should not be exposed.
1049 1049 """)
1050 1050
1051 1051 version = interfaceutil.Attribute(
1052 1052 """Revlog version number.
1053 1053
1054 1054 TODO this is revlog specific and should not be exposed.
1055 1055 """)
1056 1056
1057 1057 _generaldelta = interfaceutil.Attribute(
1058 1058 """Whether generaldelta storage is being used.
1059 1059
1060 1060 TODO this is revlog specific and should not be exposed.
1061 1061 """)
1062 1062
1063 1063 fulltextcache = interfaceutil.Attribute(
1064 1064 """Dict with cache of fulltexts.
1065 1065
1066 1066 TODO this doesn't feel appropriate for the storage interface.
1067 1067 """)
1068 1068
1069 1069 def __len__():
1070 1070 """Obtain the number of revisions stored for this manifest."""
1071 1071
1072 1072 def __iter__():
1073 1073 """Iterate over revision numbers for this manifest."""
1074 1074
1075 1075 def rev(node):
1076 1076 """Obtain the revision number given a binary node.
1077 1077
1078 1078 Raises ``error.LookupError`` if the node is not known.
1079 1079 """
1080 1080
1081 1081 def node(rev):
1082 1082 """Obtain the node value given a revision number.
1083 1083
1084 1084 Raises ``error.LookupError`` if the revision is not known.
1085 1085 """
1086 1086
1087 1087 def lookup(value):
1088 1088 """Attempt to resolve a value to a node.
1089 1089
1090 1090 Value can be a binary node, hex node, revision number, or a bytes
1091 1091 that can be converted to an integer.
1092 1092
1093 1093 Raises ``error.LookupError`` if a ndoe could not be resolved.
1094 1094
1095 1095 TODO this is only used by debug* commands and can probably be deleted
1096 1096 easily.
1097 1097 """
1098 1098
1099 1099 def parents(node):
1100 1100 """Returns a 2-tuple of parent nodes for a node.
1101 1101
1102 1102 Values will be ``nullid`` if the parent is empty.
1103 1103 """
1104 1104
1105 1105 def parentrevs(rev):
1106 1106 """Like parents() but operates on revision numbers."""
1107 1107
1108 1108 def linkrev(rev):
1109 1109 """Obtain the changeset revision number a revision is linked to."""
1110 1110
1111 1111 def revision(node, _df=None, raw=False):
1112 1112 """Obtain fulltext data for a node."""
1113 1113
1114 1114 def revdiff(rev1, rev2):
1115 1115 """Obtain a delta between two revision numbers.
1116 1116
1117 1117 The returned data is the result of ``bdiff.bdiff()`` on the raw
1118 1118 revision data.
1119 1119 """
1120 1120
1121 1121 def cmp(node, fulltext):
1122 1122 """Compare fulltext to another revision.
1123 1123
1124 1124 Returns True if the fulltext is different from what is stored.
1125 1125 """
1126 1126
1127 1127 def emitrevisiondeltas(requests):
1128 1128 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1129 1129
1130 1130 See the documentation for ``ifiledata`` for more.
1131 1131 """
1132 1132
1133 1133 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1134 1134 """Process a series of deltas for storage.
1135 1135
1136 1136 See the documentation in ``ifilemutation`` for more.
1137 1137 """
1138 1138
1139 def rawsize(rev):
1140 """Obtain the size of tracked data.
1141
1142 Is equivalent to ``len(m.revision(node, raw=True))``.
1143
1144 TODO this method is only used by upgrade code and may be removed.
1145 """
1146
1139 1147 def getstrippoint(minlink):
1140 1148 """Find minimum revision that must be stripped to strip a linkrev.
1141 1149
1142 1150 See the documentation in ``ifilemutation`` for more.
1143 1151 """
1144 1152
1145 1153 def strip(minlink, transaction):
1146 1154 """Remove storage of items starting at a linkrev.
1147 1155
1148 1156 See the documentation in ``ifilemutation`` for more.
1149 1157 """
1150 1158
1151 1159 def checksize():
1152 1160 """Obtain the expected sizes of backing files.
1153 1161
1154 1162 TODO this is used by verify and it should not be part of the interface.
1155 1163 """
1156 1164
1157 1165 def files():
1158 1166 """Obtain paths that are backing storage for this manifest.
1159 1167
1160 1168 TODO this is used by verify and there should probably be a better API
1161 1169 for this functionality.
1162 1170 """
1163 1171
1164 1172 def deltaparent(rev):
1165 1173 """Obtain the revision that a revision is delta'd against.
1166 1174
1167 1175 TODO delta encoding is an implementation detail of storage and should
1168 1176 not be exposed to the storage interface.
1169 1177 """
1170 1178
1171 1179 def clone(tr, dest, **kwargs):
1172 1180 """Clone this instance to another."""
1173 1181
1174 1182 def clearcaches(clear_persisted_data=False):
1175 1183 """Clear any caches associated with this instance."""
1176 1184
1177 1185 def dirlog(d):
1178 1186 """Obtain a manifest storage instance for a tree."""
1179 1187
1180 1188 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1181 1189 match=None):
1182 1190 """Add a revision to storage.
1183 1191
1184 1192 ``m`` is an object conforming to ``imanifestdict``.
1185 1193
1186 1194 ``link`` is the linkrev revision number.
1187 1195
1188 1196 ``p1`` and ``p2`` are the parent revision numbers.
1189 1197
1190 1198 ``added`` and ``removed`` are iterables of added and removed paths,
1191 1199 respectively.
1192 1200
1193 1201 ``readtree`` is a function that can be used to read the child tree(s)
1194 1202 when recursively writing the full tree structure when using
1195 1203 treemanifets.
1196 1204
1197 1205 ``match`` is a matcher that can be used to hint to storage that not all
1198 1206 paths must be inspected; this is an optimization and can be safely
1199 1207 ignored. Note that the storage must still be able to reproduce a full
1200 1208 manifest including files that did not match.
1201 1209 """
1202 1210
1203 1211 class imanifestlog(interfaceutil.Interface):
1204 1212 """Interface representing a collection of manifest snapshots.
1205 1213
1206 1214 Represents the root manifest in a repository.
1207 1215
1208 1216 Also serves as a means to access nested tree manifests and to cache
1209 1217 tree manifests.
1210 1218 """
1211 1219
1212 1220 def __getitem__(node):
1213 1221 """Obtain a manifest instance for a given binary node.
1214 1222
1215 1223 Equivalent to calling ``self.get('', node)``.
1216 1224
1217 1225 The returned object conforms to the ``imanifestrevisionstored``
1218 1226 interface.
1219 1227 """
1220 1228
1221 1229 def get(tree, node, verify=True):
1222 1230 """Retrieve the manifest instance for a given directory and binary node.
1223 1231
1224 1232 ``node`` always refers to the node of the root manifest (which will be
1225 1233 the only manifest if flat manifests are being used).
1226 1234
1227 1235 If ``tree`` is the empty string, the root manifest is returned.
1228 1236 Otherwise the manifest for the specified directory will be returned
1229 1237 (requires tree manifests).
1230 1238
1231 1239 If ``verify`` is True, ``LookupError`` is raised if the node is not
1232 1240 known.
1233 1241
1234 1242 The returned object conforms to the ``imanifestrevisionstored``
1235 1243 interface.
1236 1244 """
1237 1245
1238 1246 def getstorage(tree):
1239 1247 """Retrieve an interface to storage for a particular tree.
1240 1248
1241 1249 If ``tree`` is the empty bytestring, storage for the root manifest will
1242 1250 be returned. Otherwise storage for a tree manifest is returned.
1243 1251
1244 1252 TODO formalize interface for returned object.
1245 1253 """
1246 1254
1247 1255 def clearcaches():
1248 1256 """Clear caches associated with this collection."""
1249 1257
1250 1258 def rev(node):
1251 1259 """Obtain the revision number for a binary node.
1252 1260
1253 1261 Raises ``error.LookupError`` if the node is not known.
1254 1262 """
1255 1263
1256 1264 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1257 1265 """Local repository sub-interface providing access to tracked file storage.
1258 1266
1259 1267 This interface defines how a repository accesses storage for a single
1260 1268 tracked file path.
1261 1269 """
1262 1270
1263 1271 def file(f):
1264 1272 """Obtain a filelog for a tracked path.
1265 1273
1266 1274 The returned type conforms to the ``ifilestorage`` interface.
1267 1275 """
1268 1276
1269 1277 class ilocalrepositorymain(interfaceutil.Interface):
1270 1278 """Main interface for local repositories.
1271 1279
1272 1280 This currently captures the reality of things - not how things should be.
1273 1281 """
1274 1282
1275 1283 supportedformats = interfaceutil.Attribute(
1276 1284 """Set of requirements that apply to stream clone.
1277 1285
1278 1286 This is actually a class attribute and is shared among all instances.
1279 1287 """)
1280 1288
1281 1289 supported = interfaceutil.Attribute(
1282 1290 """Set of requirements that this repo is capable of opening.""")
1283 1291
1284 1292 requirements = interfaceutil.Attribute(
1285 1293 """Set of requirements this repo uses.""")
1286 1294
1287 1295 features = interfaceutil.Attribute(
1288 1296 """Set of "features" this repository supports.
1289 1297
1290 1298 A "feature" is a loosely-defined term. It can refer to a feature
1291 1299 in the classical sense or can describe an implementation detail
1292 1300 of the repository. For example, a ``readonly`` feature may denote
1293 1301 the repository as read-only. Or a ``revlogfilestore`` feature may
1294 1302 denote that the repository is using revlogs for file storage.
1295 1303
1296 1304 The intent of features is to provide a machine-queryable mechanism
1297 1305 for repo consumers to test for various repository characteristics.
1298 1306
1299 1307 Features are similar to ``requirements``. The main difference is that
1300 1308 requirements are stored on-disk and represent requirements to open the
1301 1309 repository. Features are more run-time capabilities of the repository
1302 1310 and more granular capabilities (which may be derived from requirements).
1303 1311 """)
1304 1312
1305 1313 filtername = interfaceutil.Attribute(
1306 1314 """Name of the repoview that is active on this repo.""")
1307 1315
1308 1316 wvfs = interfaceutil.Attribute(
1309 1317 """VFS used to access the working directory.""")
1310 1318
1311 1319 vfs = interfaceutil.Attribute(
1312 1320 """VFS rooted at the .hg directory.
1313 1321
1314 1322 Used to access repository data not in the store.
1315 1323 """)
1316 1324
1317 1325 svfs = interfaceutil.Attribute(
1318 1326 """VFS rooted at the store.
1319 1327
1320 1328 Used to access repository data in the store. Typically .hg/store.
1321 1329 But can point elsewhere if the store is shared.
1322 1330 """)
1323 1331
1324 1332 root = interfaceutil.Attribute(
1325 1333 """Path to the root of the working directory.""")
1326 1334
1327 1335 path = interfaceutil.Attribute(
1328 1336 """Path to the .hg directory.""")
1329 1337
1330 1338 origroot = interfaceutil.Attribute(
1331 1339 """The filesystem path that was used to construct the repo.""")
1332 1340
1333 1341 auditor = interfaceutil.Attribute(
1334 1342 """A pathauditor for the working directory.
1335 1343
1336 1344 This checks if a path refers to a nested repository.
1337 1345
1338 1346 Operates on the filesystem.
1339 1347 """)
1340 1348
1341 1349 nofsauditor = interfaceutil.Attribute(
1342 1350 """A pathauditor for the working directory.
1343 1351
1344 1352 This is like ``auditor`` except it doesn't do filesystem checks.
1345 1353 """)
1346 1354
1347 1355 baseui = interfaceutil.Attribute(
1348 1356 """Original ui instance passed into constructor.""")
1349 1357
1350 1358 ui = interfaceutil.Attribute(
1351 1359 """Main ui instance for this instance.""")
1352 1360
1353 1361 sharedpath = interfaceutil.Attribute(
1354 1362 """Path to the .hg directory of the repo this repo was shared from.""")
1355 1363
1356 1364 store = interfaceutil.Attribute(
1357 1365 """A store instance.""")
1358 1366
1359 1367 spath = interfaceutil.Attribute(
1360 1368 """Path to the store.""")
1361 1369
1362 1370 sjoin = interfaceutil.Attribute(
1363 1371 """Alias to self.store.join.""")
1364 1372
1365 1373 cachevfs = interfaceutil.Attribute(
1366 1374 """A VFS used to access the cache directory.
1367 1375
1368 1376 Typically .hg/cache.
1369 1377 """)
1370 1378
1371 1379 filteredrevcache = interfaceutil.Attribute(
1372 1380 """Holds sets of revisions to be filtered.""")
1373 1381
1374 1382 names = interfaceutil.Attribute(
1375 1383 """A ``namespaces`` instance.""")
1376 1384
1377 1385 def close():
1378 1386 """Close the handle on this repository."""
1379 1387
1380 1388 def peer():
1381 1389 """Obtain an object conforming to the ``peer`` interface."""
1382 1390
1383 1391 def unfiltered():
1384 1392 """Obtain an unfiltered/raw view of this repo."""
1385 1393
1386 1394 def filtered(name, visibilityexceptions=None):
1387 1395 """Obtain a named view of this repository."""
1388 1396
1389 1397 obsstore = interfaceutil.Attribute(
1390 1398 """A store of obsolescence data.""")
1391 1399
1392 1400 changelog = interfaceutil.Attribute(
1393 1401 """A handle on the changelog revlog.""")
1394 1402
1395 1403 manifestlog = interfaceutil.Attribute(
1396 1404 """An instance conforming to the ``imanifestlog`` interface.
1397 1405
1398 1406 Provides access to manifests for the repository.
1399 1407 """)
1400 1408
1401 1409 dirstate = interfaceutil.Attribute(
1402 1410 """Working directory state.""")
1403 1411
1404 1412 narrowpats = interfaceutil.Attribute(
1405 1413 """Matcher patterns for this repository's narrowspec.""")
1406 1414
1407 1415 def narrowmatch():
1408 1416 """Obtain a matcher for the narrowspec."""
1409 1417
1410 1418 def setnarrowpats(newincludes, newexcludes):
1411 1419 """Define the narrowspec for this repository."""
1412 1420
1413 1421 def __getitem__(changeid):
1414 1422 """Try to resolve a changectx."""
1415 1423
1416 1424 def __contains__(changeid):
1417 1425 """Whether a changeset exists."""
1418 1426
1419 1427 def __nonzero__():
1420 1428 """Always returns True."""
1421 1429 return True
1422 1430
1423 1431 __bool__ = __nonzero__
1424 1432
1425 1433 def __len__():
1426 1434 """Returns the number of changesets in the repo."""
1427 1435
1428 1436 def __iter__():
1429 1437 """Iterate over revisions in the changelog."""
1430 1438
1431 1439 def revs(expr, *args):
1432 1440 """Evaluate a revset.
1433 1441
1434 1442 Emits revisions.
1435 1443 """
1436 1444
1437 1445 def set(expr, *args):
1438 1446 """Evaluate a revset.
1439 1447
1440 1448 Emits changectx instances.
1441 1449 """
1442 1450
1443 1451 def anyrevs(specs, user=False, localalias=None):
1444 1452 """Find revisions matching one of the given revsets."""
1445 1453
1446 1454 def url():
1447 1455 """Returns a string representing the location of this repo."""
1448 1456
1449 1457 def hook(name, throw=False, **args):
1450 1458 """Call a hook."""
1451 1459
1452 1460 def tags():
1453 1461 """Return a mapping of tag to node."""
1454 1462
1455 1463 def tagtype(tagname):
1456 1464 """Return the type of a given tag."""
1457 1465
1458 1466 def tagslist():
1459 1467 """Return a list of tags ordered by revision."""
1460 1468
1461 1469 def nodetags(node):
1462 1470 """Return the tags associated with a node."""
1463 1471
1464 1472 def nodebookmarks(node):
1465 1473 """Return the list of bookmarks pointing to the specified node."""
1466 1474
1467 1475 def branchmap():
1468 1476 """Return a mapping of branch to heads in that branch."""
1469 1477
1470 1478 def revbranchcache():
1471 1479 pass
1472 1480
1473 1481 def branchtip(branchtip, ignoremissing=False):
1474 1482 """Return the tip node for a given branch."""
1475 1483
1476 1484 def lookup(key):
1477 1485 """Resolve the node for a revision."""
1478 1486
1479 1487 def lookupbranch(key):
1480 1488 """Look up the branch name of the given revision or branch name."""
1481 1489
1482 1490 def known(nodes):
1483 1491 """Determine whether a series of nodes is known.
1484 1492
1485 1493 Returns a list of bools.
1486 1494 """
1487 1495
1488 1496 def local():
1489 1497 """Whether the repository is local."""
1490 1498 return True
1491 1499
1492 1500 def publishing():
1493 1501 """Whether the repository is a publishing repository."""
1494 1502
1495 1503 def cancopy():
1496 1504 pass
1497 1505
1498 1506 def shared():
1499 1507 """The type of shared repository or None."""
1500 1508
1501 1509 def wjoin(f, *insidef):
1502 1510 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1503 1511
1504 1512 def setparents(p1, p2):
1505 1513 """Set the parent nodes of the working directory."""
1506 1514
1507 1515 def filectx(path, changeid=None, fileid=None):
1508 1516 """Obtain a filectx for the given file revision."""
1509 1517
1510 1518 def getcwd():
1511 1519 """Obtain the current working directory from the dirstate."""
1512 1520
1513 1521 def pathto(f, cwd=None):
1514 1522 """Obtain the relative path to a file."""
1515 1523
1516 1524 def adddatafilter(name, fltr):
1517 1525 pass
1518 1526
1519 1527 def wread(filename):
1520 1528 """Read a file from wvfs, using data filters."""
1521 1529
1522 1530 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1523 1531 """Write data to a file in the wvfs, using data filters."""
1524 1532
1525 1533 def wwritedata(filename, data):
1526 1534 """Resolve data for writing to the wvfs, using data filters."""
1527 1535
1528 1536 def currenttransaction():
1529 1537 """Obtain the current transaction instance or None."""
1530 1538
1531 1539 def transaction(desc, report=None):
1532 1540 """Open a new transaction to write to the repository."""
1533 1541
1534 1542 def undofiles():
1535 1543 """Returns a list of (vfs, path) for files to undo transactions."""
1536 1544
1537 1545 def recover():
1538 1546 """Roll back an interrupted transaction."""
1539 1547
1540 1548 def rollback(dryrun=False, force=False):
1541 1549 """Undo the last transaction.
1542 1550
1543 1551 DANGEROUS.
1544 1552 """
1545 1553
1546 1554 def updatecaches(tr=None, full=False):
1547 1555 """Warm repo caches."""
1548 1556
1549 1557 def invalidatecaches():
1550 1558 """Invalidate cached data due to the repository mutating."""
1551 1559
1552 1560 def invalidatevolatilesets():
1553 1561 pass
1554 1562
1555 1563 def invalidatedirstate():
1556 1564 """Invalidate the dirstate."""
1557 1565
1558 1566 def invalidate(clearfilecache=False):
1559 1567 pass
1560 1568
1561 1569 def invalidateall():
1562 1570 pass
1563 1571
1564 1572 def lock(wait=True):
1565 1573 """Lock the repository store and return a lock instance."""
1566 1574
1567 1575 def wlock(wait=True):
1568 1576 """Lock the non-store parts of the repository."""
1569 1577
1570 1578 def currentwlock():
1571 1579 """Return the wlock if it's held or None."""
1572 1580
1573 1581 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1574 1582 pass
1575 1583
1576 1584 def commit(text='', user=None, date=None, match=None, force=False,
1577 1585 editor=False, extra=None):
1578 1586 """Add a new revision to the repository."""
1579 1587
1580 1588 def commitctx(ctx, error=False):
1581 1589 """Commit a commitctx instance to the repository."""
1582 1590
1583 1591 def destroying():
1584 1592 """Inform the repository that nodes are about to be destroyed."""
1585 1593
1586 1594 def destroyed():
1587 1595 """Inform the repository that nodes have been destroyed."""
1588 1596
1589 1597 def status(node1='.', node2=None, match=None, ignored=False,
1590 1598 clean=False, unknown=False, listsubrepos=False):
1591 1599 """Convenience method to call repo[x].status()."""
1592 1600
1593 1601 def addpostdsstatus(ps):
1594 1602 pass
1595 1603
1596 1604 def postdsstatus():
1597 1605 pass
1598 1606
1599 1607 def clearpostdsstatus():
1600 1608 pass
1601 1609
1602 1610 def heads(start=None):
1603 1611 """Obtain list of nodes that are DAG heads."""
1604 1612
1605 1613 def branchheads(branch=None, start=None, closed=False):
1606 1614 pass
1607 1615
1608 1616 def branches(nodes):
1609 1617 pass
1610 1618
1611 1619 def between(pairs):
1612 1620 pass
1613 1621
1614 1622 def checkpush(pushop):
1615 1623 pass
1616 1624
1617 1625 prepushoutgoinghooks = interfaceutil.Attribute(
1618 1626 """util.hooks instance.""")
1619 1627
1620 1628 def pushkey(namespace, key, old, new):
1621 1629 pass
1622 1630
1623 1631 def listkeys(namespace):
1624 1632 pass
1625 1633
1626 1634 def debugwireargs(one, two, three=None, four=None, five=None):
1627 1635 pass
1628 1636
1629 1637 def savecommitmessage(text):
1630 1638 pass
1631 1639
1632 1640 class completelocalrepository(ilocalrepositorymain,
1633 1641 ilocalrepositoryfilestorage):
1634 1642 """Complete interface for a local repository."""
General Comments 0
You need to be logged in to leave comments. Login now