##// END OF EJS Templates
dirstate-item: add more logic to `from_p2`...
marmoute -
r48747:97e9f3fd default
parent child Browse files
Show More
@@ -1,815 +1,817 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from ..thirdparty import attr
18 18 from .. import (
19 19 error,
20 20 pycompat,
21 21 revlogutils,
22 22 util,
23 23 )
24 24
25 25 from ..revlogutils import nodemap as nodemaputil
26 26 from ..revlogutils import constants as revlog_constants
27 27
28 28 stringio = pycompat.bytesio
29 29
30 30
31 31 _pack = struct.pack
32 32 _unpack = struct.unpack
33 33 _compress = zlib.compress
34 34 _decompress = zlib.decompress
35 35
36 36
37 37 # a special value used internally for `size` if the file come from the other parent
38 38 FROM_P2 = -2
39 39
40 40 # a special value used internally for `size` if the file is modified/merged/added
41 41 NONNORMAL = -1
42 42
43 43 # a special value used internally for `time` if the time is ambigeous
44 44 AMBIGUOUS_TIME = -1
45 45
46 46
47 47 @attr.s(slots=True, init=False)
48 48 class DirstateItem(object):
49 49 """represent a dirstate entry
50 50
51 51 It contains:
52 52
53 53 - state (one of 'n', 'a', 'r', 'm')
54 54 - mode,
55 55 - size,
56 56 - mtime,
57 57 """
58 58
59 59 _wc_tracked = attr.ib()
60 60 _p1_tracked = attr.ib()
61 61 _p2_tracked = attr.ib()
62 62 # the three item above should probably be combined
63 63 #
64 64 # However it is unclear if they properly cover some of the most advanced
65 65 # merge case. So we should probably wait on this to be settled.
66 66 _merged = attr.ib()
67 67 _clean_p1 = attr.ib()
68 68 _clean_p2 = attr.ib()
69 69 _possibly_dirty = attr.ib()
70 70 _mode = attr.ib()
71 71 _size = attr.ib()
72 72 _mtime = attr.ib()
73 73
74 74 def __init__(
75 75 self,
76 76 wc_tracked=False,
77 77 p1_tracked=False,
78 78 p2_tracked=False,
79 79 merged=False,
80 80 clean_p1=False,
81 81 clean_p2=False,
82 82 possibly_dirty=False,
83 83 parentfiledata=None,
84 84 ):
85 85 if merged and (clean_p1 or clean_p2):
86 86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
87 87 raise error.ProgrammingError(msg)
88 88
89 89 self._wc_tracked = wc_tracked
90 90 self._p1_tracked = p1_tracked
91 91 self._p2_tracked = p2_tracked
92 92 self._merged = merged
93 93 self._clean_p1 = clean_p1
94 94 self._clean_p2 = clean_p2
95 95 self._possibly_dirty = possibly_dirty
96 96 if parentfiledata is None:
97 97 self._mode = None
98 98 self._size = None
99 99 self._mtime = None
100 100 else:
101 101 self._mode = parentfiledata[0]
102 102 self._size = parentfiledata[1]
103 103 self._mtime = parentfiledata[2]
104 104
105 105 @classmethod
106 106 def new_added(cls):
107 107 """constructor to help legacy API to build a new "added" item
108 108
109 109 Should eventually be removed
110 110 """
111 111 instance = cls()
112 112 instance._wc_tracked = True
113 113 instance._p1_tracked = False
114 114 instance._p2_tracked = False
115 115 return instance
116 116
117 117 @classmethod
118 118 def new_merged(cls):
119 119 """constructor to help legacy API to build a new "merged" item
120 120
121 121 Should eventually be removed
122 122 """
123 123 instance = cls()
124 124 instance._wc_tracked = True
125 125 instance._p1_tracked = True # might not be True because of rename ?
126 126 instance._p2_tracked = True # might not be True because of rename ?
127 127 instance._merged = True
128 128 return instance
129 129
130 130 @classmethod
131 131 def new_from_p2(cls):
132 132 """constructor to help legacy API to build a new "from_p2" item
133 133
134 134 Should eventually be removed
135 135 """
136 136 instance = cls()
137 137 instance._wc_tracked = True
138 138 instance._p1_tracked = False # might actually be True
139 139 instance._p2_tracked = True
140 140 instance._clean_p2 = True
141 141 return instance
142 142
143 143 @classmethod
144 144 def new_possibly_dirty(cls):
145 145 """constructor to help legacy API to build a new "possibly_dirty" item
146 146
147 147 Should eventually be removed
148 148 """
149 149 instance = cls()
150 150 instance._wc_tracked = True
151 151 instance._p1_tracked = True
152 152 instance._possibly_dirty = True
153 153 return instance
154 154
155 155 @classmethod
156 156 def new_normal(cls, mode, size, mtime):
157 157 """constructor to help legacy API to build a new "normal" item
158 158
159 159 Should eventually be removed
160 160 """
161 161 assert size != FROM_P2
162 162 assert size != NONNORMAL
163 163 instance = cls()
164 164 instance._wc_tracked = True
165 165 instance._p1_tracked = True
166 166 instance._mode = mode
167 167 instance._size = size
168 168 instance._mtime = mtime
169 169 return instance
170 170
171 171 @classmethod
172 172 def from_v1_data(cls, state, mode, size, mtime):
173 173 """Build a new DirstateItem object from V1 data
174 174
175 175 Since the dirstate-v1 format is frozen, the signature of this function
176 176 is not expected to change, unlike the __init__ one.
177 177 """
178 178 if state == b'm':
179 179 return cls.new_merged()
180 180 elif state == b'a':
181 181 return cls.new_added()
182 182 elif state == b'r':
183 183 instance = cls()
184 184 instance._wc_tracked = False
185 185 if size == NONNORMAL:
186 186 instance._merged = True
187 187 instance._p1_tracked = (
188 188 True # might not be True because of rename ?
189 189 )
190 190 instance._p2_tracked = (
191 191 True # might not be True because of rename ?
192 192 )
193 193 elif size == FROM_P2:
194 194 instance._clean_p2 = True
195 195 instance._p1_tracked = (
196 196 False # We actually don't know (file history)
197 197 )
198 198 instance._p2_tracked = True
199 199 else:
200 200 instance._p1_tracked = True
201 201 return instance
202 202 elif state == b'n':
203 203 if size == FROM_P2:
204 204 return cls.new_from_p2()
205 205 elif size == NONNORMAL:
206 206 return cls.new_possibly_dirty()
207 207 elif mtime == AMBIGUOUS_TIME:
208 208 instance = cls.new_normal(mode, size, 42)
209 209 instance._mtime = None
210 210 instance._possibly_dirty = True
211 211 return instance
212 212 else:
213 213 return cls.new_normal(mode, size, mtime)
214 214 else:
215 215 raise RuntimeError(b'unknown state: %s' % state)
216 216
217 217 def set_possibly_dirty(self):
218 218 """Mark a file as "possibly dirty"
219 219
220 220 This means the next status call will have to actually check its content
221 221 to make sure it is correct.
222 222 """
223 223 self._possibly_dirty = True
224 224
225 225 def set_untracked(self):
226 226 """mark a file as untracked in the working copy
227 227
228 228 This will ultimately be called by command like `hg remove`.
229 229 """
230 230 # backup the previous state (useful for merge)
231 231 self._wc_tracked = False
232 232 self._mode = None
233 233 self._size = None
234 234 self._mtime = None
235 235
236 236 @property
237 237 def mode(self):
238 238 return self.v1_mode()
239 239
240 240 @property
241 241 def size(self):
242 242 return self.v1_size()
243 243
244 244 @property
245 245 def mtime(self):
246 246 return self.v1_mtime()
247 247
248 248 @property
249 249 def state(self):
250 250 """
251 251 States are:
252 252 n normal
253 253 m needs merging
254 254 r marked for removal
255 255 a marked for addition
256 256
257 257 XXX This "state" is a bit obscure and mostly a direct expression of the
258 258 dirstatev1 format. It would make sense to ultimately deprecate it in
259 259 favor of the more "semantic" attributes.
260 260 """
261 261 return self.v1_state()
262 262
263 263 @property
264 264 def tracked(self):
265 265 """True is the file is tracked in the working copy"""
266 266 return self._wc_tracked
267 267
268 268 @property
269 269 def added(self):
270 270 """True if the file has been added"""
271 271 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
272 272
273 273 @property
274 274 def merged(self):
275 275 """True if the file has been merged
276 276
277 277 Should only be set if a merge is in progress in the dirstate
278 278 """
279 279 return self._wc_tracked and self._merged
280 280
281 281 @property
282 282 def from_p2(self):
283 283 """True if the file have been fetched from p2 during the current merge
284 284
285 285 This is only True is the file is currently tracked.
286 286
287 287 Should only be set if a merge is in progress in the dirstate
288 288 """
289 return self._wc_tracked and self._clean_p2
289 if not self._wc_tracked:
290 return False
291 return self._clean_p2 or (not self._p1_tracked and self._p2_tracked)
290 292
291 293 @property
292 294 def from_p2_removed(self):
293 295 """True if the file has been removed, but was "from_p2" initially
294 296
295 297 This property seems like an abstraction leakage and should probably be
296 298 dealt in this class (or maybe the dirstatemap) directly.
297 299 """
298 300 return self.removed and self._clean_p2
299 301
300 302 @property
301 303 def removed(self):
302 304 """True if the file has been removed"""
303 305 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
304 306
305 307 @property
306 308 def merged_removed(self):
307 309 """True if the file has been removed, but was "merged" initially
308 310
309 311 This property seems like an abstraction leakage and should probably be
310 312 dealt in this class (or maybe the dirstatemap) directly.
311 313 """
312 314 return self.removed and self._merged
313 315
314 316 @property
315 317 def dm_nonnormal(self):
316 318 """True is the entry is non-normal in the dirstatemap sense
317 319
318 320 There is no reason for any code, but the dirstatemap one to use this.
319 321 """
320 322 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
321 323
322 324 @property
323 325 def dm_otherparent(self):
324 326 """True is the entry is `otherparent` in the dirstatemap sense
325 327
326 328 There is no reason for any code, but the dirstatemap one to use this.
327 329 """
328 330 return self.v1_size() == FROM_P2
329 331
330 332 def v1_state(self):
331 333 """return a "state" suitable for v1 serialization"""
332 334 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
333 335 # the object has no state to record, this is -currently-
334 336 # unsupported
335 337 raise RuntimeError('untracked item')
336 338 elif not self._wc_tracked:
337 339 return b'r'
338 340 elif self._merged:
339 341 return b'm'
340 342 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
341 343 return b'a'
342 344 elif self._clean_p2 and self._wc_tracked:
343 345 return b'n'
344 346 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
345 347 return b'n'
346 348 elif self._possibly_dirty:
347 349 return b'n'
348 350 elif self._wc_tracked:
349 351 return b'n'
350 352 else:
351 353 raise RuntimeError('unreachable')
352 354
353 355 def v1_mode(self):
354 356 """return a "mode" suitable for v1 serialization"""
355 357 return self._mode if self._mode is not None else 0
356 358
357 359 def v1_size(self):
358 360 """return a "size" suitable for v1 serialization"""
359 361 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
360 362 # the object has no state to record, this is -currently-
361 363 # unsupported
362 364 raise RuntimeError('untracked item')
363 365 elif not self._wc_tracked:
364 366 # File was deleted
365 367 if self._merged:
366 368 return NONNORMAL
367 369 elif self._clean_p2:
368 370 return FROM_P2
369 371 else:
370 372 return 0
371 373 elif self._merged:
372 374 return FROM_P2
373 375 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
374 376 # Added
375 377 return NONNORMAL
376 378 elif self._clean_p2 and self._wc_tracked:
377 379 return FROM_P2
378 380 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
379 381 return FROM_P2
380 382 elif self._possibly_dirty:
381 383 if self._size is None:
382 384 return NONNORMAL
383 385 else:
384 386 return self._size
385 387 elif self._wc_tracked:
386 388 return self._size
387 389 else:
388 390 raise RuntimeError('unreachable')
389 391
390 392 def v1_mtime(self):
391 393 """return a "mtime" suitable for v1 serialization"""
392 394 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
393 395 # the object has no state to record, this is -currently-
394 396 # unsupported
395 397 raise RuntimeError('untracked item')
396 398 elif not self._wc_tracked:
397 399 return 0
398 400 elif self._possibly_dirty:
399 401 return AMBIGUOUS_TIME
400 402 elif self._merged:
401 403 return AMBIGUOUS_TIME
402 404 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
403 405 return AMBIGUOUS_TIME
404 406 elif self._clean_p2 and self._wc_tracked:
405 407 return AMBIGUOUS_TIME
406 408 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
407 409 return AMBIGUOUS_TIME
408 410 elif self._wc_tracked:
409 411 if self._mtime is None:
410 412 return 0
411 413 else:
412 414 return self._mtime
413 415 else:
414 416 raise RuntimeError('unreachable')
415 417
416 418 def need_delay(self, now):
417 419 """True if the stored mtime would be ambiguous with the current time"""
418 420 return self.v1_state() == b'n' and self.v1_mtime() == now
419 421
420 422
421 423 def gettype(q):
422 424 return int(q & 0xFFFF)
423 425
424 426
425 427 class BaseIndexObject(object):
426 428 # Can I be passed to an algorithme implemented in Rust ?
427 429 rust_ext_compat = 0
428 430 # Format of an index entry according to Python's `struct` language
429 431 index_format = revlog_constants.INDEX_ENTRY_V1
430 432 # Size of a C unsigned long long int, platform independent
431 433 big_int_size = struct.calcsize(b'>Q')
432 434 # Size of a C long int, platform independent
433 435 int_size = struct.calcsize(b'>i')
434 436 # An empty index entry, used as a default value to be overridden, or nullrev
435 437 null_item = (
436 438 0,
437 439 0,
438 440 0,
439 441 -1,
440 442 -1,
441 443 -1,
442 444 -1,
443 445 sha1nodeconstants.nullid,
444 446 0,
445 447 0,
446 448 revlog_constants.COMP_MODE_INLINE,
447 449 revlog_constants.COMP_MODE_INLINE,
448 450 )
449 451
450 452 @util.propertycache
451 453 def entry_size(self):
452 454 return self.index_format.size
453 455
454 456 @property
455 457 def nodemap(self):
456 458 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
457 459 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
458 460 return self._nodemap
459 461
460 462 @util.propertycache
461 463 def _nodemap(self):
462 464 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
463 465 for r in range(0, len(self)):
464 466 n = self[r][7]
465 467 nodemap[n] = r
466 468 return nodemap
467 469
468 470 def has_node(self, node):
469 471 """return True if the node exist in the index"""
470 472 return node in self._nodemap
471 473
472 474 def rev(self, node):
473 475 """return a revision for a node
474 476
475 477 If the node is unknown, raise a RevlogError"""
476 478 return self._nodemap[node]
477 479
478 480 def get_rev(self, node):
479 481 """return a revision for a node
480 482
481 483 If the node is unknown, return None"""
482 484 return self._nodemap.get(node)
483 485
484 486 def _stripnodes(self, start):
485 487 if '_nodemap' in vars(self):
486 488 for r in range(start, len(self)):
487 489 n = self[r][7]
488 490 del self._nodemap[n]
489 491
490 492 def clearcaches(self):
491 493 self.__dict__.pop('_nodemap', None)
492 494
493 495 def __len__(self):
494 496 return self._lgt + len(self._extra)
495 497
496 498 def append(self, tup):
497 499 if '_nodemap' in vars(self):
498 500 self._nodemap[tup[7]] = len(self)
499 501 data = self._pack_entry(len(self), tup)
500 502 self._extra.append(data)
501 503
502 504 def _pack_entry(self, rev, entry):
503 505 assert entry[8] == 0
504 506 assert entry[9] == 0
505 507 return self.index_format.pack(*entry[:8])
506 508
507 509 def _check_index(self, i):
508 510 if not isinstance(i, int):
509 511 raise TypeError(b"expecting int indexes")
510 512 if i < 0 or i >= len(self):
511 513 raise IndexError
512 514
513 515 def __getitem__(self, i):
514 516 if i == -1:
515 517 return self.null_item
516 518 self._check_index(i)
517 519 if i >= self._lgt:
518 520 data = self._extra[i - self._lgt]
519 521 else:
520 522 index = self._calculate_index(i)
521 523 data = self._data[index : index + self.entry_size]
522 524 r = self._unpack_entry(i, data)
523 525 if self._lgt and i == 0:
524 526 offset = revlogutils.offset_type(0, gettype(r[0]))
525 527 r = (offset,) + r[1:]
526 528 return r
527 529
528 530 def _unpack_entry(self, rev, data):
529 531 r = self.index_format.unpack(data)
530 532 r = r + (
531 533 0,
532 534 0,
533 535 revlog_constants.COMP_MODE_INLINE,
534 536 revlog_constants.COMP_MODE_INLINE,
535 537 )
536 538 return r
537 539
538 540 def pack_header(self, header):
539 541 """pack header information as binary"""
540 542 v_fmt = revlog_constants.INDEX_HEADER
541 543 return v_fmt.pack(header)
542 544
543 545 def entry_binary(self, rev):
544 546 """return the raw binary string representing a revision"""
545 547 entry = self[rev]
546 548 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
547 549 if rev == 0:
548 550 p = p[revlog_constants.INDEX_HEADER.size :]
549 551 return p
550 552
551 553
552 554 class IndexObject(BaseIndexObject):
553 555 def __init__(self, data):
554 556 assert len(data) % self.entry_size == 0, (
555 557 len(data),
556 558 self.entry_size,
557 559 len(data) % self.entry_size,
558 560 )
559 561 self._data = data
560 562 self._lgt = len(data) // self.entry_size
561 563 self._extra = []
562 564
563 565 def _calculate_index(self, i):
564 566 return i * self.entry_size
565 567
566 568 def __delitem__(self, i):
567 569 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
568 570 raise ValueError(b"deleting slices only supports a:-1 with step 1")
569 571 i = i.start
570 572 self._check_index(i)
571 573 self._stripnodes(i)
572 574 if i < self._lgt:
573 575 self._data = self._data[: i * self.entry_size]
574 576 self._lgt = i
575 577 self._extra = []
576 578 else:
577 579 self._extra = self._extra[: i - self._lgt]
578 580
579 581
580 582 class PersistentNodeMapIndexObject(IndexObject):
581 583 """a Debug oriented class to test persistent nodemap
582 584
583 585 We need a simple python object to test API and higher level behavior. See
584 586 the Rust implementation for more serious usage. This should be used only
585 587 through the dedicated `devel.persistent-nodemap` config.
586 588 """
587 589
588 590 def nodemap_data_all(self):
589 591 """Return bytes containing a full serialization of a nodemap
590 592
591 593 The nodemap should be valid for the full set of revisions in the
592 594 index."""
593 595 return nodemaputil.persistent_data(self)
594 596
595 597 def nodemap_data_incremental(self):
596 598 """Return bytes containing a incremental update to persistent nodemap
597 599
598 600 This containst the data for an append-only update of the data provided
599 601 in the last call to `update_nodemap_data`.
600 602 """
601 603 if self._nm_root is None:
602 604 return None
603 605 docket = self._nm_docket
604 606 changed, data = nodemaputil.update_persistent_data(
605 607 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
606 608 )
607 609
608 610 self._nm_root = self._nm_max_idx = self._nm_docket = None
609 611 return docket, changed, data
610 612
611 613 def update_nodemap_data(self, docket, nm_data):
612 614 """provide full block of persisted binary data for a nodemap
613 615
614 616 The data are expected to come from disk. See `nodemap_data_all` for a
615 617 produceur of such data."""
616 618 if nm_data is not None:
617 619 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
618 620 if self._nm_root:
619 621 self._nm_docket = docket
620 622 else:
621 623 self._nm_root = self._nm_max_idx = self._nm_docket = None
622 624
623 625
624 626 class InlinedIndexObject(BaseIndexObject):
625 627 def __init__(self, data, inline=0):
626 628 self._data = data
627 629 self._lgt = self._inline_scan(None)
628 630 self._inline_scan(self._lgt)
629 631 self._extra = []
630 632
631 633 def _inline_scan(self, lgt):
632 634 off = 0
633 635 if lgt is not None:
634 636 self._offsets = [0] * lgt
635 637 count = 0
636 638 while off <= len(self._data) - self.entry_size:
637 639 start = off + self.big_int_size
638 640 (s,) = struct.unpack(
639 641 b'>i',
640 642 self._data[start : start + self.int_size],
641 643 )
642 644 if lgt is not None:
643 645 self._offsets[count] = off
644 646 count += 1
645 647 off += self.entry_size + s
646 648 if off != len(self._data):
647 649 raise ValueError(b"corrupted data")
648 650 return count
649 651
650 652 def __delitem__(self, i):
651 653 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
652 654 raise ValueError(b"deleting slices only supports a:-1 with step 1")
653 655 i = i.start
654 656 self._check_index(i)
655 657 self._stripnodes(i)
656 658 if i < self._lgt:
657 659 self._offsets = self._offsets[:i]
658 660 self._lgt = i
659 661 self._extra = []
660 662 else:
661 663 self._extra = self._extra[: i - self._lgt]
662 664
663 665 def _calculate_index(self, i):
664 666 return self._offsets[i]
665 667
666 668
667 669 def parse_index2(data, inline, revlogv2=False):
668 670 if not inline:
669 671 cls = IndexObject2 if revlogv2 else IndexObject
670 672 return cls(data), None
671 673 cls = InlinedIndexObject
672 674 return cls(data, inline), (0, data)
673 675
674 676
675 677 def parse_index_cl_v2(data):
676 678 return IndexChangelogV2(data), None
677 679
678 680
679 681 class IndexObject2(IndexObject):
680 682 index_format = revlog_constants.INDEX_ENTRY_V2
681 683
682 684 def replace_sidedata_info(
683 685 self,
684 686 rev,
685 687 sidedata_offset,
686 688 sidedata_length,
687 689 offset_flags,
688 690 compression_mode,
689 691 ):
690 692 """
691 693 Replace an existing index entry's sidedata offset and length with new
692 694 ones.
693 695 This cannot be used outside of the context of sidedata rewriting,
694 696 inside the transaction that creates the revision `rev`.
695 697 """
696 698 if rev < 0:
697 699 raise KeyError
698 700 self._check_index(rev)
699 701 if rev < self._lgt:
700 702 msg = b"cannot rewrite entries outside of this transaction"
701 703 raise KeyError(msg)
702 704 else:
703 705 entry = list(self[rev])
704 706 entry[0] = offset_flags
705 707 entry[8] = sidedata_offset
706 708 entry[9] = sidedata_length
707 709 entry[11] = compression_mode
708 710 entry = tuple(entry)
709 711 new = self._pack_entry(rev, entry)
710 712 self._extra[rev - self._lgt] = new
711 713
712 714 def _unpack_entry(self, rev, data):
713 715 data = self.index_format.unpack(data)
714 716 entry = data[:10]
715 717 data_comp = data[10] & 3
716 718 sidedata_comp = (data[10] & (3 << 2)) >> 2
717 719 return entry + (data_comp, sidedata_comp)
718 720
719 721 def _pack_entry(self, rev, entry):
720 722 data = entry[:10]
721 723 data_comp = entry[10] & 3
722 724 sidedata_comp = (entry[11] & 3) << 2
723 725 data += (data_comp | sidedata_comp,)
724 726
725 727 return self.index_format.pack(*data)
726 728
727 729 def entry_binary(self, rev):
728 730 """return the raw binary string representing a revision"""
729 731 entry = self[rev]
730 732 return self._pack_entry(rev, entry)
731 733
732 734 def pack_header(self, header):
733 735 """pack header information as binary"""
734 736 msg = 'version header should go in the docket, not the index: %d'
735 737 msg %= header
736 738 raise error.ProgrammingError(msg)
737 739
738 740
739 741 class IndexChangelogV2(IndexObject2):
740 742 index_format = revlog_constants.INDEX_ENTRY_CL_V2
741 743
742 744 def _unpack_entry(self, rev, data, r=True):
743 745 items = self.index_format.unpack(data)
744 746 entry = items[:3] + (rev, rev) + items[3:8]
745 747 data_comp = items[8] & 3
746 748 sidedata_comp = (items[8] >> 2) & 3
747 749 return entry + (data_comp, sidedata_comp)
748 750
749 751 def _pack_entry(self, rev, entry):
750 752 assert entry[3] == rev, entry[3]
751 753 assert entry[4] == rev, entry[4]
752 754 data = entry[:3] + entry[5:10]
753 755 data_comp = entry[10] & 3
754 756 sidedata_comp = (entry[11] & 3) << 2
755 757 data += (data_comp | sidedata_comp,)
756 758 return self.index_format.pack(*data)
757 759
758 760
759 761 def parse_index_devel_nodemap(data, inline):
760 762 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
761 763 return PersistentNodeMapIndexObject(data), None
762 764
763 765
764 766 def parse_dirstate(dmap, copymap, st):
765 767 parents = [st[:20], st[20:40]]
766 768 # dereference fields so they will be local in loop
767 769 format = b">cllll"
768 770 e_size = struct.calcsize(format)
769 771 pos1 = 40
770 772 l = len(st)
771 773
772 774 # the inner loop
773 775 while pos1 < l:
774 776 pos2 = pos1 + e_size
775 777 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
776 778 pos1 = pos2 + e[4]
777 779 f = st[pos2:pos1]
778 780 if b'\0' in f:
779 781 f, c = f.split(b'\0')
780 782 copymap[f] = c
781 783 dmap[f] = DirstateItem.from_v1_data(*e[:4])
782 784 return parents
783 785
784 786
785 787 def pack_dirstate(dmap, copymap, pl, now):
786 788 now = int(now)
787 789 cs = stringio()
788 790 write = cs.write
789 791 write(b"".join(pl))
790 792 for f, e in pycompat.iteritems(dmap):
791 793 if e.need_delay(now):
792 794 # The file was last modified "simultaneously" with the current
793 795 # write to dirstate (i.e. within the same second for file-
794 796 # systems with a granularity of 1 sec). This commonly happens
795 797 # for at least a couple of files on 'update'.
796 798 # The user could change the file without changing its size
797 799 # within the same second. Invalidate the file's mtime in
798 800 # dirstate, forcing future 'status' calls to compare the
799 801 # contents of the file if the size is the same. This prevents
800 802 # mistakenly treating such files as clean.
801 803 e.set_possibly_dirty()
802 804
803 805 if f in copymap:
804 806 f = b"%s\0%s" % (f, copymap[f])
805 807 e = _pack(
806 808 b">cllll",
807 809 e.v1_state(),
808 810 e.v1_mode(),
809 811 e.v1_size(),
810 812 e.v1_mtime(),
811 813 len(f),
812 814 )
813 815 write(e)
814 816 write(f)
815 817 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now