##// END OF EJS Templates
dirstate-item: implement v1_state with higher level block...
marmoute -
r48748:0d185f73 default
parent child Browse files
Show More
@@ -1,817 +1,809 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from ..thirdparty import attr
18 18 from .. import (
19 19 error,
20 20 pycompat,
21 21 revlogutils,
22 22 util,
23 23 )
24 24
25 25 from ..revlogutils import nodemap as nodemaputil
26 26 from ..revlogutils import constants as revlog_constants
27 27
28 28 stringio = pycompat.bytesio
29 29
30 30
31 31 _pack = struct.pack
32 32 _unpack = struct.unpack
33 33 _compress = zlib.compress
34 34 _decompress = zlib.decompress
35 35
36 36
37 37 # a special value used internally for `size` if the file come from the other parent
38 38 FROM_P2 = -2
39 39
40 40 # a special value used internally for `size` if the file is modified/merged/added
41 41 NONNORMAL = -1
42 42
43 43 # a special value used internally for `time` if the time is ambigeous
44 44 AMBIGUOUS_TIME = -1
45 45
46 46
47 47 @attr.s(slots=True, init=False)
48 48 class DirstateItem(object):
49 49 """represent a dirstate entry
50 50
51 51 It contains:
52 52
53 53 - state (one of 'n', 'a', 'r', 'm')
54 54 - mode,
55 55 - size,
56 56 - mtime,
57 57 """
58 58
59 59 _wc_tracked = attr.ib()
60 60 _p1_tracked = attr.ib()
61 61 _p2_tracked = attr.ib()
62 62 # the three item above should probably be combined
63 63 #
64 64 # However it is unclear if they properly cover some of the most advanced
65 65 # merge case. So we should probably wait on this to be settled.
66 66 _merged = attr.ib()
67 67 _clean_p1 = attr.ib()
68 68 _clean_p2 = attr.ib()
69 69 _possibly_dirty = attr.ib()
70 70 _mode = attr.ib()
71 71 _size = attr.ib()
72 72 _mtime = attr.ib()
73 73
74 74 def __init__(
75 75 self,
76 76 wc_tracked=False,
77 77 p1_tracked=False,
78 78 p2_tracked=False,
79 79 merged=False,
80 80 clean_p1=False,
81 81 clean_p2=False,
82 82 possibly_dirty=False,
83 83 parentfiledata=None,
84 84 ):
85 85 if merged and (clean_p1 or clean_p2):
86 86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
87 87 raise error.ProgrammingError(msg)
88 88
89 89 self._wc_tracked = wc_tracked
90 90 self._p1_tracked = p1_tracked
91 91 self._p2_tracked = p2_tracked
92 92 self._merged = merged
93 93 self._clean_p1 = clean_p1
94 94 self._clean_p2 = clean_p2
95 95 self._possibly_dirty = possibly_dirty
96 96 if parentfiledata is None:
97 97 self._mode = None
98 98 self._size = None
99 99 self._mtime = None
100 100 else:
101 101 self._mode = parentfiledata[0]
102 102 self._size = parentfiledata[1]
103 103 self._mtime = parentfiledata[2]
104 104
105 105 @classmethod
106 106 def new_added(cls):
107 107 """constructor to help legacy API to build a new "added" item
108 108
109 109 Should eventually be removed
110 110 """
111 111 instance = cls()
112 112 instance._wc_tracked = True
113 113 instance._p1_tracked = False
114 114 instance._p2_tracked = False
115 115 return instance
116 116
117 117 @classmethod
118 118 def new_merged(cls):
119 119 """constructor to help legacy API to build a new "merged" item
120 120
121 121 Should eventually be removed
122 122 """
123 123 instance = cls()
124 124 instance._wc_tracked = True
125 125 instance._p1_tracked = True # might not be True because of rename ?
126 126 instance._p2_tracked = True # might not be True because of rename ?
127 127 instance._merged = True
128 128 return instance
129 129
130 130 @classmethod
131 131 def new_from_p2(cls):
132 132 """constructor to help legacy API to build a new "from_p2" item
133 133
134 134 Should eventually be removed
135 135 """
136 136 instance = cls()
137 137 instance._wc_tracked = True
138 138 instance._p1_tracked = False # might actually be True
139 139 instance._p2_tracked = True
140 140 instance._clean_p2 = True
141 141 return instance
142 142
143 143 @classmethod
144 144 def new_possibly_dirty(cls):
145 145 """constructor to help legacy API to build a new "possibly_dirty" item
146 146
147 147 Should eventually be removed
148 148 """
149 149 instance = cls()
150 150 instance._wc_tracked = True
151 151 instance._p1_tracked = True
152 152 instance._possibly_dirty = True
153 153 return instance
154 154
155 155 @classmethod
156 156 def new_normal(cls, mode, size, mtime):
157 157 """constructor to help legacy API to build a new "normal" item
158 158
159 159 Should eventually be removed
160 160 """
161 161 assert size != FROM_P2
162 162 assert size != NONNORMAL
163 163 instance = cls()
164 164 instance._wc_tracked = True
165 165 instance._p1_tracked = True
166 166 instance._mode = mode
167 167 instance._size = size
168 168 instance._mtime = mtime
169 169 return instance
170 170
171 171 @classmethod
172 172 def from_v1_data(cls, state, mode, size, mtime):
173 173 """Build a new DirstateItem object from V1 data
174 174
175 175 Since the dirstate-v1 format is frozen, the signature of this function
176 176 is not expected to change, unlike the __init__ one.
177 177 """
178 178 if state == b'm':
179 179 return cls.new_merged()
180 180 elif state == b'a':
181 181 return cls.new_added()
182 182 elif state == b'r':
183 183 instance = cls()
184 184 instance._wc_tracked = False
185 185 if size == NONNORMAL:
186 186 instance._merged = True
187 187 instance._p1_tracked = (
188 188 True # might not be True because of rename ?
189 189 )
190 190 instance._p2_tracked = (
191 191 True # might not be True because of rename ?
192 192 )
193 193 elif size == FROM_P2:
194 194 instance._clean_p2 = True
195 195 instance._p1_tracked = (
196 196 False # We actually don't know (file history)
197 197 )
198 198 instance._p2_tracked = True
199 199 else:
200 200 instance._p1_tracked = True
201 201 return instance
202 202 elif state == b'n':
203 203 if size == FROM_P2:
204 204 return cls.new_from_p2()
205 205 elif size == NONNORMAL:
206 206 return cls.new_possibly_dirty()
207 207 elif mtime == AMBIGUOUS_TIME:
208 208 instance = cls.new_normal(mode, size, 42)
209 209 instance._mtime = None
210 210 instance._possibly_dirty = True
211 211 return instance
212 212 else:
213 213 return cls.new_normal(mode, size, mtime)
214 214 else:
215 215 raise RuntimeError(b'unknown state: %s' % state)
216 216
217 217 def set_possibly_dirty(self):
218 218 """Mark a file as "possibly dirty"
219 219
220 220 This means the next status call will have to actually check its content
221 221 to make sure it is correct.
222 222 """
223 223 self._possibly_dirty = True
224 224
225 225 def set_untracked(self):
226 226 """mark a file as untracked in the working copy
227 227
228 228 This will ultimately be called by command like `hg remove`.
229 229 """
230 230 # backup the previous state (useful for merge)
231 231 self._wc_tracked = False
232 232 self._mode = None
233 233 self._size = None
234 234 self._mtime = None
235 235
236 236 @property
237 237 def mode(self):
238 238 return self.v1_mode()
239 239
240 240 @property
241 241 def size(self):
242 242 return self.v1_size()
243 243
244 244 @property
245 245 def mtime(self):
246 246 return self.v1_mtime()
247 247
248 248 @property
249 249 def state(self):
250 250 """
251 251 States are:
252 252 n normal
253 253 m needs merging
254 254 r marked for removal
255 255 a marked for addition
256 256
257 257 XXX This "state" is a bit obscure and mostly a direct expression of the
258 258 dirstatev1 format. It would make sense to ultimately deprecate it in
259 259 favor of the more "semantic" attributes.
260 260 """
261 261 return self.v1_state()
262 262
263 263 @property
264 264 def tracked(self):
265 265 """True is the file is tracked in the working copy"""
266 266 return self._wc_tracked
267 267
268 268 @property
269 269 def added(self):
270 270 """True if the file has been added"""
271 271 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
272 272
273 273 @property
274 274 def merged(self):
275 275 """True if the file has been merged
276 276
277 277 Should only be set if a merge is in progress in the dirstate
278 278 """
279 279 return self._wc_tracked and self._merged
280 280
281 281 @property
282 282 def from_p2(self):
283 283 """True if the file have been fetched from p2 during the current merge
284 284
285 285 This is only True is the file is currently tracked.
286 286
287 287 Should only be set if a merge is in progress in the dirstate
288 288 """
289 289 if not self._wc_tracked:
290 290 return False
291 291 return self._clean_p2 or (not self._p1_tracked and self._p2_tracked)
292 292
293 293 @property
294 294 def from_p2_removed(self):
295 295 """True if the file has been removed, but was "from_p2" initially
296 296
297 297 This property seems like an abstraction leakage and should probably be
298 298 dealt in this class (or maybe the dirstatemap) directly.
299 299 """
300 300 return self.removed and self._clean_p2
301 301
302 302 @property
303 303 def removed(self):
304 304 """True if the file has been removed"""
305 305 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
306 306
307 307 @property
308 308 def merged_removed(self):
309 309 """True if the file has been removed, but was "merged" initially
310 310
311 311 This property seems like an abstraction leakage and should probably be
312 312 dealt in this class (or maybe the dirstatemap) directly.
313 313 """
314 314 return self.removed and self._merged
315 315
316 316 @property
317 317 def dm_nonnormal(self):
318 318 """True is the entry is non-normal in the dirstatemap sense
319 319
320 320 There is no reason for any code, but the dirstatemap one to use this.
321 321 """
322 322 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
323 323
324 324 @property
325 325 def dm_otherparent(self):
326 326 """True is the entry is `otherparent` in the dirstatemap sense
327 327
328 328 There is no reason for any code, but the dirstatemap one to use this.
329 329 """
330 330 return self.v1_size() == FROM_P2
331 331
332 332 def v1_state(self):
333 333 """return a "state" suitable for v1 serialization"""
334 334 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
335 335 # the object has no state to record, this is -currently-
336 336 # unsupported
337 337 raise RuntimeError('untracked item')
338 elif not self._wc_tracked:
338 elif self.removed:
339 339 return b'r'
340 elif self._merged:
340 elif self.merged:
341 341 return b'm'
342 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
342 elif self.added:
343 343 return b'a'
344 elif self._clean_p2 and self._wc_tracked:
344 else:
345 345 return b'n'
346 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
347 return b'n'
348 elif self._possibly_dirty:
349 return b'n'
350 elif self._wc_tracked:
351 return b'n'
352 else:
353 raise RuntimeError('unreachable')
354 346
355 347 def v1_mode(self):
356 348 """return a "mode" suitable for v1 serialization"""
357 349 return self._mode if self._mode is not None else 0
358 350
359 351 def v1_size(self):
360 352 """return a "size" suitable for v1 serialization"""
361 353 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
362 354 # the object has no state to record, this is -currently-
363 355 # unsupported
364 356 raise RuntimeError('untracked item')
365 357 elif not self._wc_tracked:
366 358 # File was deleted
367 359 if self._merged:
368 360 return NONNORMAL
369 361 elif self._clean_p2:
370 362 return FROM_P2
371 363 else:
372 364 return 0
373 365 elif self._merged:
374 366 return FROM_P2
375 367 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
376 368 # Added
377 369 return NONNORMAL
378 370 elif self._clean_p2 and self._wc_tracked:
379 371 return FROM_P2
380 372 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
381 373 return FROM_P2
382 374 elif self._possibly_dirty:
383 375 if self._size is None:
384 376 return NONNORMAL
385 377 else:
386 378 return self._size
387 379 elif self._wc_tracked:
388 380 return self._size
389 381 else:
390 382 raise RuntimeError('unreachable')
391 383
392 384 def v1_mtime(self):
393 385 """return a "mtime" suitable for v1 serialization"""
394 386 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
395 387 # the object has no state to record, this is -currently-
396 388 # unsupported
397 389 raise RuntimeError('untracked item')
398 390 elif not self._wc_tracked:
399 391 return 0
400 392 elif self._possibly_dirty:
401 393 return AMBIGUOUS_TIME
402 394 elif self._merged:
403 395 return AMBIGUOUS_TIME
404 396 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
405 397 return AMBIGUOUS_TIME
406 398 elif self._clean_p2 and self._wc_tracked:
407 399 return AMBIGUOUS_TIME
408 400 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
409 401 return AMBIGUOUS_TIME
410 402 elif self._wc_tracked:
411 403 if self._mtime is None:
412 404 return 0
413 405 else:
414 406 return self._mtime
415 407 else:
416 408 raise RuntimeError('unreachable')
417 409
418 410 def need_delay(self, now):
419 411 """True if the stored mtime would be ambiguous with the current time"""
420 412 return self.v1_state() == b'n' and self.v1_mtime() == now
421 413
422 414
423 415 def gettype(q):
424 416 return int(q & 0xFFFF)
425 417
426 418
427 419 class BaseIndexObject(object):
428 420 # Can I be passed to an algorithme implemented in Rust ?
429 421 rust_ext_compat = 0
430 422 # Format of an index entry according to Python's `struct` language
431 423 index_format = revlog_constants.INDEX_ENTRY_V1
432 424 # Size of a C unsigned long long int, platform independent
433 425 big_int_size = struct.calcsize(b'>Q')
434 426 # Size of a C long int, platform independent
435 427 int_size = struct.calcsize(b'>i')
436 428 # An empty index entry, used as a default value to be overridden, or nullrev
437 429 null_item = (
438 430 0,
439 431 0,
440 432 0,
441 433 -1,
442 434 -1,
443 435 -1,
444 436 -1,
445 437 sha1nodeconstants.nullid,
446 438 0,
447 439 0,
448 440 revlog_constants.COMP_MODE_INLINE,
449 441 revlog_constants.COMP_MODE_INLINE,
450 442 )
451 443
452 444 @util.propertycache
453 445 def entry_size(self):
454 446 return self.index_format.size
455 447
456 448 @property
457 449 def nodemap(self):
458 450 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
459 451 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
460 452 return self._nodemap
461 453
462 454 @util.propertycache
463 455 def _nodemap(self):
464 456 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
465 457 for r in range(0, len(self)):
466 458 n = self[r][7]
467 459 nodemap[n] = r
468 460 return nodemap
469 461
470 462 def has_node(self, node):
471 463 """return True if the node exist in the index"""
472 464 return node in self._nodemap
473 465
474 466 def rev(self, node):
475 467 """return a revision for a node
476 468
477 469 If the node is unknown, raise a RevlogError"""
478 470 return self._nodemap[node]
479 471
480 472 def get_rev(self, node):
481 473 """return a revision for a node
482 474
483 475 If the node is unknown, return None"""
484 476 return self._nodemap.get(node)
485 477
486 478 def _stripnodes(self, start):
487 479 if '_nodemap' in vars(self):
488 480 for r in range(start, len(self)):
489 481 n = self[r][7]
490 482 del self._nodemap[n]
491 483
492 484 def clearcaches(self):
493 485 self.__dict__.pop('_nodemap', None)
494 486
495 487 def __len__(self):
496 488 return self._lgt + len(self._extra)
497 489
498 490 def append(self, tup):
499 491 if '_nodemap' in vars(self):
500 492 self._nodemap[tup[7]] = len(self)
501 493 data = self._pack_entry(len(self), tup)
502 494 self._extra.append(data)
503 495
504 496 def _pack_entry(self, rev, entry):
505 497 assert entry[8] == 0
506 498 assert entry[9] == 0
507 499 return self.index_format.pack(*entry[:8])
508 500
509 501 def _check_index(self, i):
510 502 if not isinstance(i, int):
511 503 raise TypeError(b"expecting int indexes")
512 504 if i < 0 or i >= len(self):
513 505 raise IndexError
514 506
515 507 def __getitem__(self, i):
516 508 if i == -1:
517 509 return self.null_item
518 510 self._check_index(i)
519 511 if i >= self._lgt:
520 512 data = self._extra[i - self._lgt]
521 513 else:
522 514 index = self._calculate_index(i)
523 515 data = self._data[index : index + self.entry_size]
524 516 r = self._unpack_entry(i, data)
525 517 if self._lgt and i == 0:
526 518 offset = revlogutils.offset_type(0, gettype(r[0]))
527 519 r = (offset,) + r[1:]
528 520 return r
529 521
530 522 def _unpack_entry(self, rev, data):
531 523 r = self.index_format.unpack(data)
532 524 r = r + (
533 525 0,
534 526 0,
535 527 revlog_constants.COMP_MODE_INLINE,
536 528 revlog_constants.COMP_MODE_INLINE,
537 529 )
538 530 return r
539 531
540 532 def pack_header(self, header):
541 533 """pack header information as binary"""
542 534 v_fmt = revlog_constants.INDEX_HEADER
543 535 return v_fmt.pack(header)
544 536
545 537 def entry_binary(self, rev):
546 538 """return the raw binary string representing a revision"""
547 539 entry = self[rev]
548 540 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
549 541 if rev == 0:
550 542 p = p[revlog_constants.INDEX_HEADER.size :]
551 543 return p
552 544
553 545
554 546 class IndexObject(BaseIndexObject):
555 547 def __init__(self, data):
556 548 assert len(data) % self.entry_size == 0, (
557 549 len(data),
558 550 self.entry_size,
559 551 len(data) % self.entry_size,
560 552 )
561 553 self._data = data
562 554 self._lgt = len(data) // self.entry_size
563 555 self._extra = []
564 556
565 557 def _calculate_index(self, i):
566 558 return i * self.entry_size
567 559
568 560 def __delitem__(self, i):
569 561 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
570 562 raise ValueError(b"deleting slices only supports a:-1 with step 1")
571 563 i = i.start
572 564 self._check_index(i)
573 565 self._stripnodes(i)
574 566 if i < self._lgt:
575 567 self._data = self._data[: i * self.entry_size]
576 568 self._lgt = i
577 569 self._extra = []
578 570 else:
579 571 self._extra = self._extra[: i - self._lgt]
580 572
581 573
582 574 class PersistentNodeMapIndexObject(IndexObject):
583 575 """a Debug oriented class to test persistent nodemap
584 576
585 577 We need a simple python object to test API and higher level behavior. See
586 578 the Rust implementation for more serious usage. This should be used only
587 579 through the dedicated `devel.persistent-nodemap` config.
588 580 """
589 581
590 582 def nodemap_data_all(self):
591 583 """Return bytes containing a full serialization of a nodemap
592 584
593 585 The nodemap should be valid for the full set of revisions in the
594 586 index."""
595 587 return nodemaputil.persistent_data(self)
596 588
597 589 def nodemap_data_incremental(self):
598 590 """Return bytes containing a incremental update to persistent nodemap
599 591
600 592 This containst the data for an append-only update of the data provided
601 593 in the last call to `update_nodemap_data`.
602 594 """
603 595 if self._nm_root is None:
604 596 return None
605 597 docket = self._nm_docket
606 598 changed, data = nodemaputil.update_persistent_data(
607 599 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
608 600 )
609 601
610 602 self._nm_root = self._nm_max_idx = self._nm_docket = None
611 603 return docket, changed, data
612 604
613 605 def update_nodemap_data(self, docket, nm_data):
614 606 """provide full block of persisted binary data for a nodemap
615 607
616 608 The data are expected to come from disk. See `nodemap_data_all` for a
617 609 produceur of such data."""
618 610 if nm_data is not None:
619 611 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
620 612 if self._nm_root:
621 613 self._nm_docket = docket
622 614 else:
623 615 self._nm_root = self._nm_max_idx = self._nm_docket = None
624 616
625 617
626 618 class InlinedIndexObject(BaseIndexObject):
627 619 def __init__(self, data, inline=0):
628 620 self._data = data
629 621 self._lgt = self._inline_scan(None)
630 622 self._inline_scan(self._lgt)
631 623 self._extra = []
632 624
633 625 def _inline_scan(self, lgt):
634 626 off = 0
635 627 if lgt is not None:
636 628 self._offsets = [0] * lgt
637 629 count = 0
638 630 while off <= len(self._data) - self.entry_size:
639 631 start = off + self.big_int_size
640 632 (s,) = struct.unpack(
641 633 b'>i',
642 634 self._data[start : start + self.int_size],
643 635 )
644 636 if lgt is not None:
645 637 self._offsets[count] = off
646 638 count += 1
647 639 off += self.entry_size + s
648 640 if off != len(self._data):
649 641 raise ValueError(b"corrupted data")
650 642 return count
651 643
652 644 def __delitem__(self, i):
653 645 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
654 646 raise ValueError(b"deleting slices only supports a:-1 with step 1")
655 647 i = i.start
656 648 self._check_index(i)
657 649 self._stripnodes(i)
658 650 if i < self._lgt:
659 651 self._offsets = self._offsets[:i]
660 652 self._lgt = i
661 653 self._extra = []
662 654 else:
663 655 self._extra = self._extra[: i - self._lgt]
664 656
665 657 def _calculate_index(self, i):
666 658 return self._offsets[i]
667 659
668 660
669 661 def parse_index2(data, inline, revlogv2=False):
670 662 if not inline:
671 663 cls = IndexObject2 if revlogv2 else IndexObject
672 664 return cls(data), None
673 665 cls = InlinedIndexObject
674 666 return cls(data, inline), (0, data)
675 667
676 668
677 669 def parse_index_cl_v2(data):
678 670 return IndexChangelogV2(data), None
679 671
680 672
681 673 class IndexObject2(IndexObject):
682 674 index_format = revlog_constants.INDEX_ENTRY_V2
683 675
684 676 def replace_sidedata_info(
685 677 self,
686 678 rev,
687 679 sidedata_offset,
688 680 sidedata_length,
689 681 offset_flags,
690 682 compression_mode,
691 683 ):
692 684 """
693 685 Replace an existing index entry's sidedata offset and length with new
694 686 ones.
695 687 This cannot be used outside of the context of sidedata rewriting,
696 688 inside the transaction that creates the revision `rev`.
697 689 """
698 690 if rev < 0:
699 691 raise KeyError
700 692 self._check_index(rev)
701 693 if rev < self._lgt:
702 694 msg = b"cannot rewrite entries outside of this transaction"
703 695 raise KeyError(msg)
704 696 else:
705 697 entry = list(self[rev])
706 698 entry[0] = offset_flags
707 699 entry[8] = sidedata_offset
708 700 entry[9] = sidedata_length
709 701 entry[11] = compression_mode
710 702 entry = tuple(entry)
711 703 new = self._pack_entry(rev, entry)
712 704 self._extra[rev - self._lgt] = new
713 705
714 706 def _unpack_entry(self, rev, data):
715 707 data = self.index_format.unpack(data)
716 708 entry = data[:10]
717 709 data_comp = data[10] & 3
718 710 sidedata_comp = (data[10] & (3 << 2)) >> 2
719 711 return entry + (data_comp, sidedata_comp)
720 712
721 713 def _pack_entry(self, rev, entry):
722 714 data = entry[:10]
723 715 data_comp = entry[10] & 3
724 716 sidedata_comp = (entry[11] & 3) << 2
725 717 data += (data_comp | sidedata_comp,)
726 718
727 719 return self.index_format.pack(*data)
728 720
729 721 def entry_binary(self, rev):
730 722 """return the raw binary string representing a revision"""
731 723 entry = self[rev]
732 724 return self._pack_entry(rev, entry)
733 725
734 726 def pack_header(self, header):
735 727 """pack header information as binary"""
736 728 msg = 'version header should go in the docket, not the index: %d'
737 729 msg %= header
738 730 raise error.ProgrammingError(msg)
739 731
740 732
741 733 class IndexChangelogV2(IndexObject2):
742 734 index_format = revlog_constants.INDEX_ENTRY_CL_V2
743 735
744 736 def _unpack_entry(self, rev, data, r=True):
745 737 items = self.index_format.unpack(data)
746 738 entry = items[:3] + (rev, rev) + items[3:8]
747 739 data_comp = items[8] & 3
748 740 sidedata_comp = (items[8] >> 2) & 3
749 741 return entry + (data_comp, sidedata_comp)
750 742
751 743 def _pack_entry(self, rev, entry):
752 744 assert entry[3] == rev, entry[3]
753 745 assert entry[4] == rev, entry[4]
754 746 data = entry[:3] + entry[5:10]
755 747 data_comp = entry[10] & 3
756 748 sidedata_comp = (entry[11] & 3) << 2
757 749 data += (data_comp | sidedata_comp,)
758 750 return self.index_format.pack(*data)
759 751
760 752
761 753 def parse_index_devel_nodemap(data, inline):
762 754 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
763 755 return PersistentNodeMapIndexObject(data), None
764 756
765 757
766 758 def parse_dirstate(dmap, copymap, st):
767 759 parents = [st[:20], st[20:40]]
768 760 # dereference fields so they will be local in loop
769 761 format = b">cllll"
770 762 e_size = struct.calcsize(format)
771 763 pos1 = 40
772 764 l = len(st)
773 765
774 766 # the inner loop
775 767 while pos1 < l:
776 768 pos2 = pos1 + e_size
777 769 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
778 770 pos1 = pos2 + e[4]
779 771 f = st[pos2:pos1]
780 772 if b'\0' in f:
781 773 f, c = f.split(b'\0')
782 774 copymap[f] = c
783 775 dmap[f] = DirstateItem.from_v1_data(*e[:4])
784 776 return parents
785 777
786 778
787 779 def pack_dirstate(dmap, copymap, pl, now):
788 780 now = int(now)
789 781 cs = stringio()
790 782 write = cs.write
791 783 write(b"".join(pl))
792 784 for f, e in pycompat.iteritems(dmap):
793 785 if e.need_delay(now):
794 786 # The file was last modified "simultaneously" with the current
795 787 # write to dirstate (i.e. within the same second for file-
796 788 # systems with a granularity of 1 sec). This commonly happens
797 789 # for at least a couple of files on 'update'.
798 790 # The user could change the file without changing its size
799 791 # within the same second. Invalidate the file's mtime in
800 792 # dirstate, forcing future 'status' calls to compare the
801 793 # contents of the file if the size is the same. This prevents
802 794 # mistakenly treating such files as clean.
803 795 e.set_possibly_dirty()
804 796
805 797 if f in copymap:
806 798 f = b"%s\0%s" % (f, copymap[f])
807 799 e = _pack(
808 800 b">cllll",
809 801 e.v1_state(),
810 802 e.v1_mode(),
811 803 e.v1_size(),
812 804 e.v1_mtime(),
813 805 len(f),
814 806 )
815 807 write(e)
816 808 write(f)
817 809 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now