##// END OF EJS Templates
dirstate-item: implement `added` in a simpler way...
marmoute -
r48741:03f57915 default
parent child Browse files
Show More
@@ -1,815 +1,815 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from ..thirdparty import attr
18 18 from .. import (
19 19 error,
20 20 pycompat,
21 21 revlogutils,
22 22 util,
23 23 )
24 24
25 25 from ..revlogutils import nodemap as nodemaputil
26 26 from ..revlogutils import constants as revlog_constants
27 27
28 28 stringio = pycompat.bytesio
29 29
30 30
31 31 _pack = struct.pack
32 32 _unpack = struct.unpack
33 33 _compress = zlib.compress
34 34 _decompress = zlib.decompress
35 35
36 36
37 37 # a special value used internally for `size` if the file come from the other parent
38 38 FROM_P2 = -2
39 39
40 40 # a special value used internally for `size` if the file is modified/merged/added
41 41 NONNORMAL = -1
42 42
43 43 # a special value used internally for `time` if the time is ambigeous
44 44 AMBIGUOUS_TIME = -1
45 45
46 46
47 47 @attr.s(slots=True, init=False)
48 48 class DirstateItem(object):
49 49 """represent a dirstate entry
50 50
51 51 It contains:
52 52
53 53 - state (one of 'n', 'a', 'r', 'm')
54 54 - mode,
55 55 - size,
56 56 - mtime,
57 57 """
58 58
59 59 _wc_tracked = attr.ib()
60 60 _p1_tracked = attr.ib()
61 61 _p2_tracked = attr.ib()
62 62 # the three item above should probably be combined
63 63 #
64 64 # However it is unclear if they properly cover some of the most advanced
65 65 # merge case. So we should probably wait on this to be settled.
66 66 _merged = attr.ib()
67 67 _clean_p1 = attr.ib()
68 68 _clean_p2 = attr.ib()
69 69 _possibly_dirty = attr.ib()
70 70 _mode = attr.ib()
71 71 _size = attr.ib()
72 72 _mtime = attr.ib()
73 73
74 74 def __init__(
75 75 self,
76 76 wc_tracked=False,
77 77 p1_tracked=False,
78 78 p2_tracked=False,
79 79 merged=False,
80 80 clean_p1=False,
81 81 clean_p2=False,
82 82 possibly_dirty=False,
83 83 parentfiledata=None,
84 84 ):
85 85 if merged and (clean_p1 or clean_p2):
86 86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
87 87 raise error.ProgrammingError(msg)
88 88
89 89 self._wc_tracked = wc_tracked
90 90 self._p1_tracked = p1_tracked
91 91 self._p2_tracked = p2_tracked
92 92 self._merged = merged
93 93 self._clean_p1 = clean_p1
94 94 self._clean_p2 = clean_p2
95 95 self._possibly_dirty = possibly_dirty
96 96 if parentfiledata is None:
97 97 self._mode = None
98 98 self._size = None
99 99 self._mtime = None
100 100 else:
101 101 self._mode = parentfiledata[0]
102 102 self._size = parentfiledata[1]
103 103 self._mtime = parentfiledata[2]
104 104
105 105 @classmethod
106 106 def new_added(cls):
107 107 """constructor to help legacy API to build a new "added" item
108 108
109 109 Should eventually be removed
110 110 """
111 111 instance = cls()
112 112 instance._wc_tracked = True
113 113 instance._p1_tracked = False
114 114 instance._p2_tracked = False
115 115 return instance
116 116
117 117 @classmethod
118 118 def new_merged(cls):
119 119 """constructor to help legacy API to build a new "merged" item
120 120
121 121 Should eventually be removed
122 122 """
123 123 instance = cls()
124 124 instance._wc_tracked = True
125 125 instance._p1_tracked = True # might not be True because of rename ?
126 126 instance._p2_tracked = True # might not be True because of rename ?
127 127 instance._merged = True
128 128 return instance
129 129
130 130 @classmethod
131 131 def new_from_p2(cls):
132 132 """constructor to help legacy API to build a new "from_p2" item
133 133
134 134 Should eventually be removed
135 135 """
136 136 instance = cls()
137 137 instance._wc_tracked = True
138 138 instance._p1_tracked = False # might actually be True
139 139 instance._p2_tracked = True
140 140 instance._clean_p2 = True
141 141 return instance
142 142
143 143 @classmethod
144 144 def new_possibly_dirty(cls):
145 145 """constructor to help legacy API to build a new "possibly_dirty" item
146 146
147 147 Should eventually be removed
148 148 """
149 149 instance = cls()
150 150 instance._wc_tracked = True
151 151 instance._p1_tracked = True
152 152 instance._possibly_dirty = True
153 153 return instance
154 154
155 155 @classmethod
156 156 def new_normal(cls, mode, size, mtime):
157 157 """constructor to help legacy API to build a new "normal" item
158 158
159 159 Should eventually be removed
160 160 """
161 161 assert size != FROM_P2
162 162 assert size != NONNORMAL
163 163 instance = cls()
164 164 instance._wc_tracked = True
165 165 instance._p1_tracked = True
166 166 instance._mode = mode
167 167 instance._size = size
168 168 instance._mtime = mtime
169 169 return instance
170 170
171 171 @classmethod
172 172 def from_v1_data(cls, state, mode, size, mtime):
173 173 """Build a new DirstateItem object from V1 data
174 174
175 175 Since the dirstate-v1 format is frozen, the signature of this function
176 176 is not expected to change, unlike the __init__ one.
177 177 """
178 178 if state == b'm':
179 179 return cls.new_merged()
180 180 elif state == b'a':
181 181 return cls.new_added()
182 182 elif state == b'r':
183 183 instance = cls()
184 184 instance._wc_tracked = False
185 185 if size == NONNORMAL:
186 186 instance._merged = True
187 187 instance._p1_tracked = (
188 188 True # might not be True because of rename ?
189 189 )
190 190 instance._p2_tracked = (
191 191 True # might not be True because of rename ?
192 192 )
193 193 elif size == FROM_P2:
194 194 instance._clean_p2 = True
195 195 instance._p1_tracked = (
196 196 False # We actually don't know (file history)
197 197 )
198 198 instance._p2_tracked = True
199 199 else:
200 200 instance._p1_tracked = True
201 201 return instance
202 202 elif state == b'n':
203 203 if size == FROM_P2:
204 204 return cls.new_from_p2()
205 205 elif size == NONNORMAL:
206 206 return cls.new_possibly_dirty()
207 207 elif mtime == AMBIGUOUS_TIME:
208 208 instance = cls.new_normal(mode, size, 42)
209 209 instance._mtime = None
210 210 instance._possibly_dirty = True
211 211 return instance
212 212 else:
213 213 return cls.new_normal(mode, size, mtime)
214 214 else:
215 215 raise RuntimeError(b'unknown state: %s' % state)
216 216
217 217 def set_possibly_dirty(self):
218 218 """Mark a file as "possibly dirty"
219 219
220 220 This means the next status call will have to actually check its content
221 221 to make sure it is correct.
222 222 """
223 223 self._possibly_dirty = True
224 224
225 225 def set_untracked(self):
226 226 """mark a file as untracked in the working copy
227 227
228 228 This will ultimately be called by command like `hg remove`.
229 229 """
230 230 # backup the previous state (useful for merge)
231 231 self._wc_tracked = False
232 232 self._mode = None
233 233 self._size = None
234 234 self._mtime = None
235 235
236 236 @property
237 237 def mode(self):
238 238 return self.v1_mode()
239 239
240 240 @property
241 241 def size(self):
242 242 return self.v1_size()
243 243
244 244 @property
245 245 def mtime(self):
246 246 return self.v1_mtime()
247 247
248 248 @property
249 249 def state(self):
250 250 """
251 251 States are:
252 252 n normal
253 253 m needs merging
254 254 r marked for removal
255 255 a marked for addition
256 256
257 257 XXX This "state" is a bit obscure and mostly a direct expression of the
258 258 dirstatev1 format. It would make sense to ultimately deprecate it in
259 259 favor of the more "semantic" attributes.
260 260 """
261 261 return self.v1_state()
262 262
263 263 @property
264 264 def tracked(self):
265 265 """True is the file is tracked in the working copy"""
266 266 return self._wc_tracked
267 267
268 268 @property
269 269 def added(self):
270 270 """True if the file has been added"""
271 return self.v1_state() == b'a'
271 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
272 272
273 273 @property
274 274 def merged(self):
275 275 """True if the file has been merged
276 276
277 277 Should only be set if a merge is in progress in the dirstate
278 278 """
279 279 return self.v1_state() == b'm'
280 280
281 281 @property
282 282 def from_p2(self):
283 283 """True if the file have been fetched from p2 during the current merge
284 284
285 285 This is only True is the file is currently tracked.
286 286
287 287 Should only be set if a merge is in progress in the dirstate
288 288 """
289 289 return self.v1_state() == b'n' and self.v1_size() == FROM_P2
290 290
291 291 @property
292 292 def from_p2_removed(self):
293 293 """True if the file has been removed, but was "from_p2" initially
294 294
295 295 This property seems like an abstraction leakage and should probably be
296 296 dealt in this class (or maybe the dirstatemap) directly.
297 297 """
298 298 return self.v1_state() == b'r' and self.v1_size() == FROM_P2
299 299
300 300 @property
301 301 def removed(self):
302 302 """True if the file has been removed"""
303 303 return self.v1_state() == b'r'
304 304
305 305 @property
306 306 def merged_removed(self):
307 307 """True if the file has been removed, but was "merged" initially
308 308
309 309 This property seems like an abstraction leakage and should probably be
310 310 dealt in this class (or maybe the dirstatemap) directly.
311 311 """
312 312 return self.v1_state() == b'r' and self.v1_size() == NONNORMAL
313 313
314 314 @property
315 315 def dm_nonnormal(self):
316 316 """True is the entry is non-normal in the dirstatemap sense
317 317
318 318 There is no reason for any code, but the dirstatemap one to use this.
319 319 """
320 320 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
321 321
322 322 @property
323 323 def dm_otherparent(self):
324 324 """True is the entry is `otherparent` in the dirstatemap sense
325 325
326 326 There is no reason for any code, but the dirstatemap one to use this.
327 327 """
328 328 return self.v1_size() == FROM_P2
329 329
330 330 def v1_state(self):
331 331 """return a "state" suitable for v1 serialization"""
332 332 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
333 333 # the object has no state to record, this is -currently-
334 334 # unsupported
335 335 raise RuntimeError('untracked item')
336 336 elif not self._wc_tracked:
337 337 return b'r'
338 338 elif self._merged:
339 339 return b'm'
340 340 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
341 341 return b'a'
342 342 elif self._clean_p2 and self._wc_tracked:
343 343 return b'n'
344 344 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
345 345 return b'n'
346 346 elif self._possibly_dirty:
347 347 return b'n'
348 348 elif self._wc_tracked:
349 349 return b'n'
350 350 else:
351 351 raise RuntimeError('unreachable')
352 352
353 353 def v1_mode(self):
354 354 """return a "mode" suitable for v1 serialization"""
355 355 return self._mode if self._mode is not None else 0
356 356
357 357 def v1_size(self):
358 358 """return a "size" suitable for v1 serialization"""
359 359 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
360 360 # the object has no state to record, this is -currently-
361 361 # unsupported
362 362 raise RuntimeError('untracked item')
363 363 elif not self._wc_tracked:
364 364 # File was deleted
365 365 if self._merged:
366 366 return NONNORMAL
367 367 elif self._clean_p2:
368 368 return FROM_P2
369 369 else:
370 370 return 0
371 371 elif self._merged:
372 372 return FROM_P2
373 373 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
374 374 # Added
375 375 return NONNORMAL
376 376 elif self._clean_p2 and self._wc_tracked:
377 377 return FROM_P2
378 378 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
379 379 return FROM_P2
380 380 elif self._possibly_dirty:
381 381 if self._size is None:
382 382 return NONNORMAL
383 383 else:
384 384 return self._size
385 385 elif self._wc_tracked:
386 386 return self._size
387 387 else:
388 388 raise RuntimeError('unreachable')
389 389
390 390 def v1_mtime(self):
391 391 """return a "mtime" suitable for v1 serialization"""
392 392 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
393 393 # the object has no state to record, this is -currently-
394 394 # unsupported
395 395 raise RuntimeError('untracked item')
396 396 elif not self._wc_tracked:
397 397 return 0
398 398 elif self._possibly_dirty:
399 399 return AMBIGUOUS_TIME
400 400 elif self._merged:
401 401 return AMBIGUOUS_TIME
402 402 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
403 403 return AMBIGUOUS_TIME
404 404 elif self._clean_p2 and self._wc_tracked:
405 405 return AMBIGUOUS_TIME
406 406 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
407 407 return AMBIGUOUS_TIME
408 408 elif self._wc_tracked:
409 409 if self._mtime is None:
410 410 return 0
411 411 else:
412 412 return self._mtime
413 413 else:
414 414 raise RuntimeError('unreachable')
415 415
416 416 def need_delay(self, now):
417 417 """True if the stored mtime would be ambiguous with the current time"""
418 418 return self.v1_state() == b'n' and self.v1_mtime() == now
419 419
420 420
421 421 def gettype(q):
422 422 return int(q & 0xFFFF)
423 423
424 424
425 425 class BaseIndexObject(object):
426 426 # Can I be passed to an algorithme implemented in Rust ?
427 427 rust_ext_compat = 0
428 428 # Format of an index entry according to Python's `struct` language
429 429 index_format = revlog_constants.INDEX_ENTRY_V1
430 430 # Size of a C unsigned long long int, platform independent
431 431 big_int_size = struct.calcsize(b'>Q')
432 432 # Size of a C long int, platform independent
433 433 int_size = struct.calcsize(b'>i')
434 434 # An empty index entry, used as a default value to be overridden, or nullrev
435 435 null_item = (
436 436 0,
437 437 0,
438 438 0,
439 439 -1,
440 440 -1,
441 441 -1,
442 442 -1,
443 443 sha1nodeconstants.nullid,
444 444 0,
445 445 0,
446 446 revlog_constants.COMP_MODE_INLINE,
447 447 revlog_constants.COMP_MODE_INLINE,
448 448 )
449 449
450 450 @util.propertycache
451 451 def entry_size(self):
452 452 return self.index_format.size
453 453
454 454 @property
455 455 def nodemap(self):
456 456 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
457 457 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
458 458 return self._nodemap
459 459
460 460 @util.propertycache
461 461 def _nodemap(self):
462 462 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
463 463 for r in range(0, len(self)):
464 464 n = self[r][7]
465 465 nodemap[n] = r
466 466 return nodemap
467 467
468 468 def has_node(self, node):
469 469 """return True if the node exist in the index"""
470 470 return node in self._nodemap
471 471
472 472 def rev(self, node):
473 473 """return a revision for a node
474 474
475 475 If the node is unknown, raise a RevlogError"""
476 476 return self._nodemap[node]
477 477
478 478 def get_rev(self, node):
479 479 """return a revision for a node
480 480
481 481 If the node is unknown, return None"""
482 482 return self._nodemap.get(node)
483 483
484 484 def _stripnodes(self, start):
485 485 if '_nodemap' in vars(self):
486 486 for r in range(start, len(self)):
487 487 n = self[r][7]
488 488 del self._nodemap[n]
489 489
490 490 def clearcaches(self):
491 491 self.__dict__.pop('_nodemap', None)
492 492
493 493 def __len__(self):
494 494 return self._lgt + len(self._extra)
495 495
496 496 def append(self, tup):
497 497 if '_nodemap' in vars(self):
498 498 self._nodemap[tup[7]] = len(self)
499 499 data = self._pack_entry(len(self), tup)
500 500 self._extra.append(data)
501 501
502 502 def _pack_entry(self, rev, entry):
503 503 assert entry[8] == 0
504 504 assert entry[9] == 0
505 505 return self.index_format.pack(*entry[:8])
506 506
507 507 def _check_index(self, i):
508 508 if not isinstance(i, int):
509 509 raise TypeError(b"expecting int indexes")
510 510 if i < 0 or i >= len(self):
511 511 raise IndexError
512 512
513 513 def __getitem__(self, i):
514 514 if i == -1:
515 515 return self.null_item
516 516 self._check_index(i)
517 517 if i >= self._lgt:
518 518 data = self._extra[i - self._lgt]
519 519 else:
520 520 index = self._calculate_index(i)
521 521 data = self._data[index : index + self.entry_size]
522 522 r = self._unpack_entry(i, data)
523 523 if self._lgt and i == 0:
524 524 offset = revlogutils.offset_type(0, gettype(r[0]))
525 525 r = (offset,) + r[1:]
526 526 return r
527 527
528 528 def _unpack_entry(self, rev, data):
529 529 r = self.index_format.unpack(data)
530 530 r = r + (
531 531 0,
532 532 0,
533 533 revlog_constants.COMP_MODE_INLINE,
534 534 revlog_constants.COMP_MODE_INLINE,
535 535 )
536 536 return r
537 537
538 538 def pack_header(self, header):
539 539 """pack header information as binary"""
540 540 v_fmt = revlog_constants.INDEX_HEADER
541 541 return v_fmt.pack(header)
542 542
543 543 def entry_binary(self, rev):
544 544 """return the raw binary string representing a revision"""
545 545 entry = self[rev]
546 546 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
547 547 if rev == 0:
548 548 p = p[revlog_constants.INDEX_HEADER.size :]
549 549 return p
550 550
551 551
552 552 class IndexObject(BaseIndexObject):
553 553 def __init__(self, data):
554 554 assert len(data) % self.entry_size == 0, (
555 555 len(data),
556 556 self.entry_size,
557 557 len(data) % self.entry_size,
558 558 )
559 559 self._data = data
560 560 self._lgt = len(data) // self.entry_size
561 561 self._extra = []
562 562
563 563 def _calculate_index(self, i):
564 564 return i * self.entry_size
565 565
566 566 def __delitem__(self, i):
567 567 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
568 568 raise ValueError(b"deleting slices only supports a:-1 with step 1")
569 569 i = i.start
570 570 self._check_index(i)
571 571 self._stripnodes(i)
572 572 if i < self._lgt:
573 573 self._data = self._data[: i * self.entry_size]
574 574 self._lgt = i
575 575 self._extra = []
576 576 else:
577 577 self._extra = self._extra[: i - self._lgt]
578 578
579 579
580 580 class PersistentNodeMapIndexObject(IndexObject):
581 581 """a Debug oriented class to test persistent nodemap
582 582
583 583 We need a simple python object to test API and higher level behavior. See
584 584 the Rust implementation for more serious usage. This should be used only
585 585 through the dedicated `devel.persistent-nodemap` config.
586 586 """
587 587
588 588 def nodemap_data_all(self):
589 589 """Return bytes containing a full serialization of a nodemap
590 590
591 591 The nodemap should be valid for the full set of revisions in the
592 592 index."""
593 593 return nodemaputil.persistent_data(self)
594 594
595 595 def nodemap_data_incremental(self):
596 596 """Return bytes containing a incremental update to persistent nodemap
597 597
598 598 This containst the data for an append-only update of the data provided
599 599 in the last call to `update_nodemap_data`.
600 600 """
601 601 if self._nm_root is None:
602 602 return None
603 603 docket = self._nm_docket
604 604 changed, data = nodemaputil.update_persistent_data(
605 605 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
606 606 )
607 607
608 608 self._nm_root = self._nm_max_idx = self._nm_docket = None
609 609 return docket, changed, data
610 610
611 611 def update_nodemap_data(self, docket, nm_data):
612 612 """provide full block of persisted binary data for a nodemap
613 613
614 614 The data are expected to come from disk. See `nodemap_data_all` for a
615 615 produceur of such data."""
616 616 if nm_data is not None:
617 617 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
618 618 if self._nm_root:
619 619 self._nm_docket = docket
620 620 else:
621 621 self._nm_root = self._nm_max_idx = self._nm_docket = None
622 622
623 623
624 624 class InlinedIndexObject(BaseIndexObject):
625 625 def __init__(self, data, inline=0):
626 626 self._data = data
627 627 self._lgt = self._inline_scan(None)
628 628 self._inline_scan(self._lgt)
629 629 self._extra = []
630 630
631 631 def _inline_scan(self, lgt):
632 632 off = 0
633 633 if lgt is not None:
634 634 self._offsets = [0] * lgt
635 635 count = 0
636 636 while off <= len(self._data) - self.entry_size:
637 637 start = off + self.big_int_size
638 638 (s,) = struct.unpack(
639 639 b'>i',
640 640 self._data[start : start + self.int_size],
641 641 )
642 642 if lgt is not None:
643 643 self._offsets[count] = off
644 644 count += 1
645 645 off += self.entry_size + s
646 646 if off != len(self._data):
647 647 raise ValueError(b"corrupted data")
648 648 return count
649 649
650 650 def __delitem__(self, i):
651 651 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
652 652 raise ValueError(b"deleting slices only supports a:-1 with step 1")
653 653 i = i.start
654 654 self._check_index(i)
655 655 self._stripnodes(i)
656 656 if i < self._lgt:
657 657 self._offsets = self._offsets[:i]
658 658 self._lgt = i
659 659 self._extra = []
660 660 else:
661 661 self._extra = self._extra[: i - self._lgt]
662 662
663 663 def _calculate_index(self, i):
664 664 return self._offsets[i]
665 665
666 666
667 667 def parse_index2(data, inline, revlogv2=False):
668 668 if not inline:
669 669 cls = IndexObject2 if revlogv2 else IndexObject
670 670 return cls(data), None
671 671 cls = InlinedIndexObject
672 672 return cls(data, inline), (0, data)
673 673
674 674
675 675 def parse_index_cl_v2(data):
676 676 return IndexChangelogV2(data), None
677 677
678 678
679 679 class IndexObject2(IndexObject):
680 680 index_format = revlog_constants.INDEX_ENTRY_V2
681 681
682 682 def replace_sidedata_info(
683 683 self,
684 684 rev,
685 685 sidedata_offset,
686 686 sidedata_length,
687 687 offset_flags,
688 688 compression_mode,
689 689 ):
690 690 """
691 691 Replace an existing index entry's sidedata offset and length with new
692 692 ones.
693 693 This cannot be used outside of the context of sidedata rewriting,
694 694 inside the transaction that creates the revision `rev`.
695 695 """
696 696 if rev < 0:
697 697 raise KeyError
698 698 self._check_index(rev)
699 699 if rev < self._lgt:
700 700 msg = b"cannot rewrite entries outside of this transaction"
701 701 raise KeyError(msg)
702 702 else:
703 703 entry = list(self[rev])
704 704 entry[0] = offset_flags
705 705 entry[8] = sidedata_offset
706 706 entry[9] = sidedata_length
707 707 entry[11] = compression_mode
708 708 entry = tuple(entry)
709 709 new = self._pack_entry(rev, entry)
710 710 self._extra[rev - self._lgt] = new
711 711
712 712 def _unpack_entry(self, rev, data):
713 713 data = self.index_format.unpack(data)
714 714 entry = data[:10]
715 715 data_comp = data[10] & 3
716 716 sidedata_comp = (data[10] & (3 << 2)) >> 2
717 717 return entry + (data_comp, sidedata_comp)
718 718
719 719 def _pack_entry(self, rev, entry):
720 720 data = entry[:10]
721 721 data_comp = entry[10] & 3
722 722 sidedata_comp = (entry[11] & 3) << 2
723 723 data += (data_comp | sidedata_comp,)
724 724
725 725 return self.index_format.pack(*data)
726 726
727 727 def entry_binary(self, rev):
728 728 """return the raw binary string representing a revision"""
729 729 entry = self[rev]
730 730 return self._pack_entry(rev, entry)
731 731
732 732 def pack_header(self, header):
733 733 """pack header information as binary"""
734 734 msg = 'version header should go in the docket, not the index: %d'
735 735 msg %= header
736 736 raise error.ProgrammingError(msg)
737 737
738 738
739 739 class IndexChangelogV2(IndexObject2):
740 740 index_format = revlog_constants.INDEX_ENTRY_CL_V2
741 741
742 742 def _unpack_entry(self, rev, data, r=True):
743 743 items = self.index_format.unpack(data)
744 744 entry = items[:3] + (rev, rev) + items[3:8]
745 745 data_comp = items[8] & 3
746 746 sidedata_comp = (items[8] >> 2) & 3
747 747 return entry + (data_comp, sidedata_comp)
748 748
749 749 def _pack_entry(self, rev, entry):
750 750 assert entry[3] == rev, entry[3]
751 751 assert entry[4] == rev, entry[4]
752 752 data = entry[:3] + entry[5:10]
753 753 data_comp = entry[10] & 3
754 754 sidedata_comp = (entry[11] & 3) << 2
755 755 data += (data_comp | sidedata_comp,)
756 756 return self.index_format.pack(*data)
757 757
758 758
759 759 def parse_index_devel_nodemap(data, inline):
760 760 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
761 761 return PersistentNodeMapIndexObject(data), None
762 762
763 763
764 764 def parse_dirstate(dmap, copymap, st):
765 765 parents = [st[:20], st[20:40]]
766 766 # dereference fields so they will be local in loop
767 767 format = b">cllll"
768 768 e_size = struct.calcsize(format)
769 769 pos1 = 40
770 770 l = len(st)
771 771
772 772 # the inner loop
773 773 while pos1 < l:
774 774 pos2 = pos1 + e_size
775 775 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
776 776 pos1 = pos2 + e[4]
777 777 f = st[pos2:pos1]
778 778 if b'\0' in f:
779 779 f, c = f.split(b'\0')
780 780 copymap[f] = c
781 781 dmap[f] = DirstateItem.from_v1_data(*e[:4])
782 782 return parents
783 783
784 784
785 785 def pack_dirstate(dmap, copymap, pl, now):
786 786 now = int(now)
787 787 cs = stringio()
788 788 write = cs.write
789 789 write(b"".join(pl))
790 790 for f, e in pycompat.iteritems(dmap):
791 791 if e.need_delay(now):
792 792 # The file was last modified "simultaneously" with the current
793 793 # write to dirstate (i.e. within the same second for file-
794 794 # systems with a granularity of 1 sec). This commonly happens
795 795 # for at least a couple of files on 'update'.
796 796 # The user could change the file without changing its size
797 797 # within the same second. Invalidate the file's mtime in
798 798 # dirstate, forcing future 'status' calls to compare the
799 799 # contents of the file if the size is the same. This prevents
800 800 # mistakenly treating such files as clean.
801 801 e.set_possibly_dirty()
802 802
803 803 if f in copymap:
804 804 f = b"%s\0%s" % (f, copymap[f])
805 805 e = _pack(
806 806 b">cllll",
807 807 e.v1_state(),
808 808 e.v1_mode(),
809 809 e.v1_size(),
810 810 e.v1_mtime(),
811 811 len(f),
812 812 )
813 813 write(e)
814 814 write(f)
815 815 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now