##// END OF EJS Templates
dirstate-entry: use `?` for the state of entry without any tracking...
marmoute -
r48900:418611f1 default
parent child Browse files
Show More
@@ -1,824 +1,826 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from ..thirdparty import attr
18 18 from .. import (
19 19 error,
20 20 pycompat,
21 21 revlogutils,
22 22 util,
23 23 )
24 24
25 25 from ..revlogutils import nodemap as nodemaputil
26 26 from ..revlogutils import constants as revlog_constants
27 27
28 28 stringio = pycompat.bytesio
29 29
30 30
31 31 _pack = struct.pack
32 32 _unpack = struct.unpack
33 33 _compress = zlib.compress
34 34 _decompress = zlib.decompress
35 35
36 36
37 37 # a special value used internally for `size` if the file come from the other parent
38 38 FROM_P2 = -2
39 39
40 40 # a special value used internally for `size` if the file is modified/merged/added
41 41 NONNORMAL = -1
42 42
43 43 # a special value used internally for `time` if the time is ambigeous
44 44 AMBIGUOUS_TIME = -1
45 45
46 46
47 47 @attr.s(slots=True, init=False)
48 48 class DirstateItem(object):
49 49 """represent a dirstate entry
50 50
51 51 It contains:
52 52
53 53 - state (one of 'n', 'a', 'r', 'm')
54 54 - mode,
55 55 - size,
56 56 - mtime,
57 57 """
58 58
59 59 _wc_tracked = attr.ib()
60 60 _p1_tracked = attr.ib()
61 61 _p2_tracked = attr.ib()
62 62 # the three item above should probably be combined
63 63 #
64 64 # However it is unclear if they properly cover some of the most advanced
65 65 # merge case. So we should probably wait on this to be settled.
66 66 _merged = attr.ib()
67 67 _clean_p1 = attr.ib()
68 68 _clean_p2 = attr.ib()
69 69 _possibly_dirty = attr.ib()
70 70 _mode = attr.ib()
71 71 _size = attr.ib()
72 72 _mtime = attr.ib()
73 73
74 74 def __init__(
75 75 self,
76 76 wc_tracked=False,
77 77 p1_tracked=False,
78 78 p2_tracked=False,
79 79 merged=False,
80 80 clean_p1=False,
81 81 clean_p2=False,
82 82 possibly_dirty=False,
83 83 parentfiledata=None,
84 84 ):
85 85 if merged and (clean_p1 or clean_p2):
86 86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
87 87 raise error.ProgrammingError(msg)
88 88
89 89 self._wc_tracked = wc_tracked
90 90 self._p1_tracked = p1_tracked
91 91 self._p2_tracked = p2_tracked
92 92 self._merged = merged
93 93 self._clean_p1 = clean_p1
94 94 self._clean_p2 = clean_p2
95 95 self._possibly_dirty = possibly_dirty
96 96 if parentfiledata is None:
97 97 self._mode = None
98 98 self._size = None
99 99 self._mtime = None
100 100 else:
101 101 self._mode = parentfiledata[0]
102 102 self._size = parentfiledata[1]
103 103 self._mtime = parentfiledata[2]
104 104
105 105 @classmethod
106 106 def new_added(cls):
107 107 """constructor to help legacy API to build a new "added" item
108 108
109 109 Should eventually be removed
110 110 """
111 111 instance = cls()
112 112 instance._wc_tracked = True
113 113 instance._p1_tracked = False
114 114 instance._p2_tracked = False
115 115 return instance
116 116
117 117 @classmethod
118 118 def new_merged(cls):
119 119 """constructor to help legacy API to build a new "merged" item
120 120
121 121 Should eventually be removed
122 122 """
123 123 instance = cls()
124 124 instance._wc_tracked = True
125 125 instance._p1_tracked = True # might not be True because of rename ?
126 126 instance._p2_tracked = True # might not be True because of rename ?
127 127 instance._merged = True
128 128 return instance
129 129
130 130 @classmethod
131 131 def new_from_p2(cls):
132 132 """constructor to help legacy API to build a new "from_p2" item
133 133
134 134 Should eventually be removed
135 135 """
136 136 instance = cls()
137 137 instance._wc_tracked = True
138 138 instance._p1_tracked = False # might actually be True
139 139 instance._p2_tracked = True
140 140 instance._clean_p2 = True
141 141 return instance
142 142
143 143 @classmethod
144 144 def new_possibly_dirty(cls):
145 145 """constructor to help legacy API to build a new "possibly_dirty" item
146 146
147 147 Should eventually be removed
148 148 """
149 149 instance = cls()
150 150 instance._wc_tracked = True
151 151 instance._p1_tracked = True
152 152 instance._possibly_dirty = True
153 153 return instance
154 154
155 155 @classmethod
156 156 def new_normal(cls, mode, size, mtime):
157 157 """constructor to help legacy API to build a new "normal" item
158 158
159 159 Should eventually be removed
160 160 """
161 161 assert size != FROM_P2
162 162 assert size != NONNORMAL
163 163 instance = cls()
164 164 instance._wc_tracked = True
165 165 instance._p1_tracked = True
166 166 instance._mode = mode
167 167 instance._size = size
168 168 instance._mtime = mtime
169 169 return instance
170 170
171 171 @classmethod
172 172 def from_v1_data(cls, state, mode, size, mtime):
173 173 """Build a new DirstateItem object from V1 data
174 174
175 175 Since the dirstate-v1 format is frozen, the signature of this function
176 176 is not expected to change, unlike the __init__ one.
177 177 """
178 178 if state == b'm':
179 179 return cls.new_merged()
180 180 elif state == b'a':
181 181 return cls.new_added()
182 182 elif state == b'r':
183 183 instance = cls()
184 184 instance._wc_tracked = False
185 185 if size == NONNORMAL:
186 186 instance._merged = True
187 187 instance._p1_tracked = (
188 188 True # might not be True because of rename ?
189 189 )
190 190 instance._p2_tracked = (
191 191 True # might not be True because of rename ?
192 192 )
193 193 elif size == FROM_P2:
194 194 instance._clean_p2 = True
195 195 instance._p1_tracked = (
196 196 False # We actually don't know (file history)
197 197 )
198 198 instance._p2_tracked = True
199 199 else:
200 200 instance._p1_tracked = True
201 201 return instance
202 202 elif state == b'n':
203 203 if size == FROM_P2:
204 204 return cls.new_from_p2()
205 205 elif size == NONNORMAL:
206 206 return cls.new_possibly_dirty()
207 207 elif mtime == AMBIGUOUS_TIME:
208 208 instance = cls.new_normal(mode, size, 42)
209 209 instance._mtime = None
210 210 instance._possibly_dirty = True
211 211 return instance
212 212 else:
213 213 return cls.new_normal(mode, size, mtime)
214 214 else:
215 215 raise RuntimeError(b'unknown state: %s' % state)
216 216
217 217 def set_possibly_dirty(self):
218 218 """Mark a file as "possibly dirty"
219 219
220 220 This means the next status call will have to actually check its content
221 221 to make sure it is correct.
222 222 """
223 223 self._possibly_dirty = True
224 224
225 225 def set_clean(self, mode, size, mtime):
226 226 """mark a file as "clean" cancelling potential "possibly dirty call"
227 227
228 228 Note: this function is a descendant of `dirstate.normal` and is
229 229 currently expected to be call on "normal" entry only. There are not
230 230 reason for this to not change in the future as long as the ccode is
231 231 updated to preserve the proper state of the non-normal files.
232 232 """
233 233 self._wc_tracked = True
234 234 self._p1_tracked = True
235 235 self._p2_tracked = False # this might be wrong
236 236 self._merged = False
237 237 self._clean_p2 = False
238 238 self._possibly_dirty = False
239 239 self._mode = mode
240 240 self._size = size
241 241 self._mtime = mtime
242 242
243 243 def set_tracked(self):
244 244 """mark a file as tracked in the working copy
245 245
246 246 This will ultimately be called by command like `hg add`.
247 247 """
248 248 self._wc_tracked = True
249 249 # `set_tracked` is replacing various `normallookup` call. So we set
250 250 # "possibly dirty" to stay on the safe side.
251 251 #
252 252 # Consider dropping this in the future in favor of something less broad.
253 253 self._possibly_dirty = True
254 254
255 255 def set_untracked(self):
256 256 """mark a file as untracked in the working copy
257 257
258 258 This will ultimately be called by command like `hg remove`.
259 259 """
260 260 # backup the previous state (useful for merge)
261 261 self._wc_tracked = False
262 262 self._mode = None
263 263 self._size = None
264 264 self._mtime = None
265 265
266 266 def drop_merge_data(self):
267 267 """remove all "merge-only" from a DirstateItem
268 268
269 269 This is to be call by the dirstatemap code when the second parent is dropped
270 270 """
271 271 if not (self.merged or self.from_p2):
272 272 return
273 273 self._p1_tracked = self.merged # why is this not already properly set ?
274 274
275 275 self._merged = False
276 276 self._clean_p1 = False
277 277 self._clean_p2 = False
278 278 self._p2_tracked = False
279 279 self._possibly_dirty = True
280 280 self._mode = None
281 281 self._size = None
282 282 self._mtime = None
283 283
284 284 @property
285 285 def mode(self):
286 286 return self.v1_mode()
287 287
288 288 @property
289 289 def size(self):
290 290 return self.v1_size()
291 291
292 292 @property
293 293 def mtime(self):
294 294 return self.v1_mtime()
295 295
296 296 @property
297 297 def state(self):
298 298 """
299 299 States are:
300 300 n normal
301 301 m needs merging
302 302 r marked for removal
303 303 a marked for addition
304 304
305 305 XXX This "state" is a bit obscure and mostly a direct expression of the
306 306 dirstatev1 format. It would make sense to ultimately deprecate it in
307 307 favor of the more "semantic" attributes.
308 308 """
309 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
310 return b'?'
309 311 return self.v1_state()
310 312
311 313 @property
312 314 def tracked(self):
313 315 """True is the file is tracked in the working copy"""
314 316 return self._wc_tracked
315 317
316 318 @property
317 319 def any_tracked(self):
318 320 """True is the file is tracked anywhere (wc or parents)"""
319 321 return self._wc_tracked or self._p1_tracked or self._p2_tracked
320 322
321 323 @property
322 324 def added(self):
323 325 """True if the file has been added"""
324 326 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
325 327
326 328 @property
327 329 def maybe_clean(self):
328 330 """True if the file has a chance to be in the "clean" state"""
329 331 if not self._wc_tracked:
330 332 return False
331 333 elif self.added:
332 334 return False
333 335 elif self._merged:
334 336 return False
335 337 elif self._clean_p2:
336 338 return False
337 339 return True
338 340
339 341 @property
340 342 def merged(self):
341 343 """True if the file has been merged
342 344
343 345 Should only be set if a merge is in progress in the dirstate
344 346 """
345 347 return self._wc_tracked and self._merged
346 348
347 349 @property
348 350 def from_p2(self):
349 351 """True if the file have been fetched from p2 during the current merge
350 352
351 353 This is only True is the file is currently tracked.
352 354
353 355 Should only be set if a merge is in progress in the dirstate
354 356 """
355 357 if not self._wc_tracked:
356 358 return False
357 359 return self._clean_p2
358 360
359 361 @property
360 362 def removed(self):
361 363 """True if the file has been removed"""
362 364 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
363 365
364 366 def v1_state(self):
365 367 """return a "state" suitable for v1 serialization"""
366 368 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
367 369 # the object has no state to record, this is -currently-
368 370 # unsupported
369 371 raise RuntimeError('untracked item')
370 372 elif self.removed:
371 373 return b'r'
372 374 elif self.merged:
373 375 return b'm'
374 376 elif self.added:
375 377 return b'a'
376 378 else:
377 379 return b'n'
378 380
379 381 def v1_mode(self):
380 382 """return a "mode" suitable for v1 serialization"""
381 383 return self._mode if self._mode is not None else 0
382 384
383 385 def v1_size(self):
384 386 """return a "size" suitable for v1 serialization"""
385 387 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
386 388 # the object has no state to record, this is -currently-
387 389 # unsupported
388 390 raise RuntimeError('untracked item')
389 391 elif self.removed and self._merged:
390 392 return NONNORMAL
391 393 elif self.removed and self._clean_p2:
392 394 return FROM_P2
393 395 elif self.removed:
394 396 return 0
395 397 elif self.merged:
396 398 return FROM_P2
397 399 elif self.added:
398 400 return NONNORMAL
399 401 elif self.from_p2:
400 402 return FROM_P2
401 403 elif self._possibly_dirty:
402 404 return self._size if self._size is not None else NONNORMAL
403 405 else:
404 406 return self._size
405 407
406 408 def v1_mtime(self):
407 409 """return a "mtime" suitable for v1 serialization"""
408 410 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
409 411 # the object has no state to record, this is -currently-
410 412 # unsupported
411 413 raise RuntimeError('untracked item')
412 414 elif self.removed:
413 415 return 0
414 416 elif self._possibly_dirty:
415 417 return AMBIGUOUS_TIME
416 418 elif self.merged:
417 419 return AMBIGUOUS_TIME
418 420 elif self.added:
419 421 return AMBIGUOUS_TIME
420 422 elif self.from_p2:
421 423 return AMBIGUOUS_TIME
422 424 else:
423 425 return self._mtime if self._mtime is not None else 0
424 426
425 427 def need_delay(self, now):
426 428 """True if the stored mtime would be ambiguous with the current time"""
427 429 return self.v1_state() == b'n' and self.v1_mtime() == now
428 430
429 431
430 432 def gettype(q):
431 433 return int(q & 0xFFFF)
432 434
433 435
434 436 class BaseIndexObject(object):
435 437 # Can I be passed to an algorithme implemented in Rust ?
436 438 rust_ext_compat = 0
437 439 # Format of an index entry according to Python's `struct` language
438 440 index_format = revlog_constants.INDEX_ENTRY_V1
439 441 # Size of a C unsigned long long int, platform independent
440 442 big_int_size = struct.calcsize(b'>Q')
441 443 # Size of a C long int, platform independent
442 444 int_size = struct.calcsize(b'>i')
443 445 # An empty index entry, used as a default value to be overridden, or nullrev
444 446 null_item = (
445 447 0,
446 448 0,
447 449 0,
448 450 -1,
449 451 -1,
450 452 -1,
451 453 -1,
452 454 sha1nodeconstants.nullid,
453 455 0,
454 456 0,
455 457 revlog_constants.COMP_MODE_INLINE,
456 458 revlog_constants.COMP_MODE_INLINE,
457 459 )
458 460
459 461 @util.propertycache
460 462 def entry_size(self):
461 463 return self.index_format.size
462 464
463 465 @property
464 466 def nodemap(self):
465 467 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
466 468 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
467 469 return self._nodemap
468 470
469 471 @util.propertycache
470 472 def _nodemap(self):
471 473 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
472 474 for r in range(0, len(self)):
473 475 n = self[r][7]
474 476 nodemap[n] = r
475 477 return nodemap
476 478
477 479 def has_node(self, node):
478 480 """return True if the node exist in the index"""
479 481 return node in self._nodemap
480 482
481 483 def rev(self, node):
482 484 """return a revision for a node
483 485
484 486 If the node is unknown, raise a RevlogError"""
485 487 return self._nodemap[node]
486 488
487 489 def get_rev(self, node):
488 490 """return a revision for a node
489 491
490 492 If the node is unknown, return None"""
491 493 return self._nodemap.get(node)
492 494
493 495 def _stripnodes(self, start):
494 496 if '_nodemap' in vars(self):
495 497 for r in range(start, len(self)):
496 498 n = self[r][7]
497 499 del self._nodemap[n]
498 500
499 501 def clearcaches(self):
500 502 self.__dict__.pop('_nodemap', None)
501 503
502 504 def __len__(self):
503 505 return self._lgt + len(self._extra)
504 506
505 507 def append(self, tup):
506 508 if '_nodemap' in vars(self):
507 509 self._nodemap[tup[7]] = len(self)
508 510 data = self._pack_entry(len(self), tup)
509 511 self._extra.append(data)
510 512
511 513 def _pack_entry(self, rev, entry):
512 514 assert entry[8] == 0
513 515 assert entry[9] == 0
514 516 return self.index_format.pack(*entry[:8])
515 517
516 518 def _check_index(self, i):
517 519 if not isinstance(i, int):
518 520 raise TypeError(b"expecting int indexes")
519 521 if i < 0 or i >= len(self):
520 522 raise IndexError
521 523
522 524 def __getitem__(self, i):
523 525 if i == -1:
524 526 return self.null_item
525 527 self._check_index(i)
526 528 if i >= self._lgt:
527 529 data = self._extra[i - self._lgt]
528 530 else:
529 531 index = self._calculate_index(i)
530 532 data = self._data[index : index + self.entry_size]
531 533 r = self._unpack_entry(i, data)
532 534 if self._lgt and i == 0:
533 535 offset = revlogutils.offset_type(0, gettype(r[0]))
534 536 r = (offset,) + r[1:]
535 537 return r
536 538
537 539 def _unpack_entry(self, rev, data):
538 540 r = self.index_format.unpack(data)
539 541 r = r + (
540 542 0,
541 543 0,
542 544 revlog_constants.COMP_MODE_INLINE,
543 545 revlog_constants.COMP_MODE_INLINE,
544 546 )
545 547 return r
546 548
547 549 def pack_header(self, header):
548 550 """pack header information as binary"""
549 551 v_fmt = revlog_constants.INDEX_HEADER
550 552 return v_fmt.pack(header)
551 553
552 554 def entry_binary(self, rev):
553 555 """return the raw binary string representing a revision"""
554 556 entry = self[rev]
555 557 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
556 558 if rev == 0:
557 559 p = p[revlog_constants.INDEX_HEADER.size :]
558 560 return p
559 561
560 562
561 563 class IndexObject(BaseIndexObject):
562 564 def __init__(self, data):
563 565 assert len(data) % self.entry_size == 0, (
564 566 len(data),
565 567 self.entry_size,
566 568 len(data) % self.entry_size,
567 569 )
568 570 self._data = data
569 571 self._lgt = len(data) // self.entry_size
570 572 self._extra = []
571 573
572 574 def _calculate_index(self, i):
573 575 return i * self.entry_size
574 576
575 577 def __delitem__(self, i):
576 578 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
577 579 raise ValueError(b"deleting slices only supports a:-1 with step 1")
578 580 i = i.start
579 581 self._check_index(i)
580 582 self._stripnodes(i)
581 583 if i < self._lgt:
582 584 self._data = self._data[: i * self.entry_size]
583 585 self._lgt = i
584 586 self._extra = []
585 587 else:
586 588 self._extra = self._extra[: i - self._lgt]
587 589
588 590
589 591 class PersistentNodeMapIndexObject(IndexObject):
590 592 """a Debug oriented class to test persistent nodemap
591 593
592 594 We need a simple python object to test API and higher level behavior. See
593 595 the Rust implementation for more serious usage. This should be used only
594 596 through the dedicated `devel.persistent-nodemap` config.
595 597 """
596 598
597 599 def nodemap_data_all(self):
598 600 """Return bytes containing a full serialization of a nodemap
599 601
600 602 The nodemap should be valid for the full set of revisions in the
601 603 index."""
602 604 return nodemaputil.persistent_data(self)
603 605
604 606 def nodemap_data_incremental(self):
605 607 """Return bytes containing a incremental update to persistent nodemap
606 608
607 609 This containst the data for an append-only update of the data provided
608 610 in the last call to `update_nodemap_data`.
609 611 """
610 612 if self._nm_root is None:
611 613 return None
612 614 docket = self._nm_docket
613 615 changed, data = nodemaputil.update_persistent_data(
614 616 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
615 617 )
616 618
617 619 self._nm_root = self._nm_max_idx = self._nm_docket = None
618 620 return docket, changed, data
619 621
620 622 def update_nodemap_data(self, docket, nm_data):
621 623 """provide full block of persisted binary data for a nodemap
622 624
623 625 The data are expected to come from disk. See `nodemap_data_all` for a
624 626 produceur of such data."""
625 627 if nm_data is not None:
626 628 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
627 629 if self._nm_root:
628 630 self._nm_docket = docket
629 631 else:
630 632 self._nm_root = self._nm_max_idx = self._nm_docket = None
631 633
632 634
633 635 class InlinedIndexObject(BaseIndexObject):
634 636 def __init__(self, data, inline=0):
635 637 self._data = data
636 638 self._lgt = self._inline_scan(None)
637 639 self._inline_scan(self._lgt)
638 640 self._extra = []
639 641
640 642 def _inline_scan(self, lgt):
641 643 off = 0
642 644 if lgt is not None:
643 645 self._offsets = [0] * lgt
644 646 count = 0
645 647 while off <= len(self._data) - self.entry_size:
646 648 start = off + self.big_int_size
647 649 (s,) = struct.unpack(
648 650 b'>i',
649 651 self._data[start : start + self.int_size],
650 652 )
651 653 if lgt is not None:
652 654 self._offsets[count] = off
653 655 count += 1
654 656 off += self.entry_size + s
655 657 if off != len(self._data):
656 658 raise ValueError(b"corrupted data")
657 659 return count
658 660
659 661 def __delitem__(self, i):
660 662 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
661 663 raise ValueError(b"deleting slices only supports a:-1 with step 1")
662 664 i = i.start
663 665 self._check_index(i)
664 666 self._stripnodes(i)
665 667 if i < self._lgt:
666 668 self._offsets = self._offsets[:i]
667 669 self._lgt = i
668 670 self._extra = []
669 671 else:
670 672 self._extra = self._extra[: i - self._lgt]
671 673
672 674 def _calculate_index(self, i):
673 675 return self._offsets[i]
674 676
675 677
676 678 def parse_index2(data, inline, revlogv2=False):
677 679 if not inline:
678 680 cls = IndexObject2 if revlogv2 else IndexObject
679 681 return cls(data), None
680 682 cls = InlinedIndexObject
681 683 return cls(data, inline), (0, data)
682 684
683 685
684 686 def parse_index_cl_v2(data):
685 687 return IndexChangelogV2(data), None
686 688
687 689
688 690 class IndexObject2(IndexObject):
689 691 index_format = revlog_constants.INDEX_ENTRY_V2
690 692
691 693 def replace_sidedata_info(
692 694 self,
693 695 rev,
694 696 sidedata_offset,
695 697 sidedata_length,
696 698 offset_flags,
697 699 compression_mode,
698 700 ):
699 701 """
700 702 Replace an existing index entry's sidedata offset and length with new
701 703 ones.
702 704 This cannot be used outside of the context of sidedata rewriting,
703 705 inside the transaction that creates the revision `rev`.
704 706 """
705 707 if rev < 0:
706 708 raise KeyError
707 709 self._check_index(rev)
708 710 if rev < self._lgt:
709 711 msg = b"cannot rewrite entries outside of this transaction"
710 712 raise KeyError(msg)
711 713 else:
712 714 entry = list(self[rev])
713 715 entry[0] = offset_flags
714 716 entry[8] = sidedata_offset
715 717 entry[9] = sidedata_length
716 718 entry[11] = compression_mode
717 719 entry = tuple(entry)
718 720 new = self._pack_entry(rev, entry)
719 721 self._extra[rev - self._lgt] = new
720 722
721 723 def _unpack_entry(self, rev, data):
722 724 data = self.index_format.unpack(data)
723 725 entry = data[:10]
724 726 data_comp = data[10] & 3
725 727 sidedata_comp = (data[10] & (3 << 2)) >> 2
726 728 return entry + (data_comp, sidedata_comp)
727 729
728 730 def _pack_entry(self, rev, entry):
729 731 data = entry[:10]
730 732 data_comp = entry[10] & 3
731 733 sidedata_comp = (entry[11] & 3) << 2
732 734 data += (data_comp | sidedata_comp,)
733 735
734 736 return self.index_format.pack(*data)
735 737
736 738 def entry_binary(self, rev):
737 739 """return the raw binary string representing a revision"""
738 740 entry = self[rev]
739 741 return self._pack_entry(rev, entry)
740 742
741 743 def pack_header(self, header):
742 744 """pack header information as binary"""
743 745 msg = 'version header should go in the docket, not the index: %d'
744 746 msg %= header
745 747 raise error.ProgrammingError(msg)
746 748
747 749
748 750 class IndexChangelogV2(IndexObject2):
749 751 index_format = revlog_constants.INDEX_ENTRY_CL_V2
750 752
751 753 def _unpack_entry(self, rev, data, r=True):
752 754 items = self.index_format.unpack(data)
753 755 entry = items[:3] + (rev, rev) + items[3:8]
754 756 data_comp = items[8] & 3
755 757 sidedata_comp = (items[8] >> 2) & 3
756 758 return entry + (data_comp, sidedata_comp)
757 759
758 760 def _pack_entry(self, rev, entry):
759 761 assert entry[3] == rev, entry[3]
760 762 assert entry[4] == rev, entry[4]
761 763 data = entry[:3] + entry[5:10]
762 764 data_comp = entry[10] & 3
763 765 sidedata_comp = (entry[11] & 3) << 2
764 766 data += (data_comp | sidedata_comp,)
765 767 return self.index_format.pack(*data)
766 768
767 769
768 770 def parse_index_devel_nodemap(data, inline):
769 771 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
770 772 return PersistentNodeMapIndexObject(data), None
771 773
772 774
773 775 def parse_dirstate(dmap, copymap, st):
774 776 parents = [st[:20], st[20:40]]
775 777 # dereference fields so they will be local in loop
776 778 format = b">cllll"
777 779 e_size = struct.calcsize(format)
778 780 pos1 = 40
779 781 l = len(st)
780 782
781 783 # the inner loop
782 784 while pos1 < l:
783 785 pos2 = pos1 + e_size
784 786 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
785 787 pos1 = pos2 + e[4]
786 788 f = st[pos2:pos1]
787 789 if b'\0' in f:
788 790 f, c = f.split(b'\0')
789 791 copymap[f] = c
790 792 dmap[f] = DirstateItem.from_v1_data(*e[:4])
791 793 return parents
792 794
793 795
794 796 def pack_dirstate(dmap, copymap, pl, now):
795 797 now = int(now)
796 798 cs = stringio()
797 799 write = cs.write
798 800 write(b"".join(pl))
799 801 for f, e in pycompat.iteritems(dmap):
800 802 if e.need_delay(now):
801 803 # The file was last modified "simultaneously" with the current
802 804 # write to dirstate (i.e. within the same second for file-
803 805 # systems with a granularity of 1 sec). This commonly happens
804 806 # for at least a couple of files on 'update'.
805 807 # The user could change the file without changing its size
806 808 # within the same second. Invalidate the file's mtime in
807 809 # dirstate, forcing future 'status' calls to compare the
808 810 # contents of the file if the size is the same. This prevents
809 811 # mistakenly treating such files as clean.
810 812 e.set_possibly_dirty()
811 813
812 814 if f in copymap:
813 815 f = b"%s\0%s" % (f, copymap[f])
814 816 e = _pack(
815 817 b">cllll",
816 818 e.v1_state(),
817 819 e.v1_mode(),
818 820 e.v1_size(),
819 821 e.v1_mtime(),
820 822 len(f),
821 823 )
822 824 write(e)
823 825 write(f)
824 826 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now