##// END OF EJS Templates
dirstate-item: implement `v1_size` with higher level block...
marmoute -
r48749:dbf2d038 default
parent child Browse files
Show More
@@ -1,809 +1,799 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from ..thirdparty import attr
18 18 from .. import (
19 19 error,
20 20 pycompat,
21 21 revlogutils,
22 22 util,
23 23 )
24 24
25 25 from ..revlogutils import nodemap as nodemaputil
26 26 from ..revlogutils import constants as revlog_constants
27 27
28 28 stringio = pycompat.bytesio
29 29
30 30
31 31 _pack = struct.pack
32 32 _unpack = struct.unpack
33 33 _compress = zlib.compress
34 34 _decompress = zlib.decompress
35 35
36 36
37 37 # a special value used internally for `size` if the file come from the other parent
38 38 FROM_P2 = -2
39 39
40 40 # a special value used internally for `size` if the file is modified/merged/added
41 41 NONNORMAL = -1
42 42
43 43 # a special value used internally for `time` if the time is ambigeous
44 44 AMBIGUOUS_TIME = -1
45 45
46 46
47 47 @attr.s(slots=True, init=False)
48 48 class DirstateItem(object):
49 49 """represent a dirstate entry
50 50
51 51 It contains:
52 52
53 53 - state (one of 'n', 'a', 'r', 'm')
54 54 - mode,
55 55 - size,
56 56 - mtime,
57 57 """
58 58
59 59 _wc_tracked = attr.ib()
60 60 _p1_tracked = attr.ib()
61 61 _p2_tracked = attr.ib()
62 62 # the three item above should probably be combined
63 63 #
64 64 # However it is unclear if they properly cover some of the most advanced
65 65 # merge case. So we should probably wait on this to be settled.
66 66 _merged = attr.ib()
67 67 _clean_p1 = attr.ib()
68 68 _clean_p2 = attr.ib()
69 69 _possibly_dirty = attr.ib()
70 70 _mode = attr.ib()
71 71 _size = attr.ib()
72 72 _mtime = attr.ib()
73 73
74 74 def __init__(
75 75 self,
76 76 wc_tracked=False,
77 77 p1_tracked=False,
78 78 p2_tracked=False,
79 79 merged=False,
80 80 clean_p1=False,
81 81 clean_p2=False,
82 82 possibly_dirty=False,
83 83 parentfiledata=None,
84 84 ):
85 85 if merged and (clean_p1 or clean_p2):
86 86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
87 87 raise error.ProgrammingError(msg)
88 88
89 89 self._wc_tracked = wc_tracked
90 90 self._p1_tracked = p1_tracked
91 91 self._p2_tracked = p2_tracked
92 92 self._merged = merged
93 93 self._clean_p1 = clean_p1
94 94 self._clean_p2 = clean_p2
95 95 self._possibly_dirty = possibly_dirty
96 96 if parentfiledata is None:
97 97 self._mode = None
98 98 self._size = None
99 99 self._mtime = None
100 100 else:
101 101 self._mode = parentfiledata[0]
102 102 self._size = parentfiledata[1]
103 103 self._mtime = parentfiledata[2]
104 104
105 105 @classmethod
106 106 def new_added(cls):
107 107 """constructor to help legacy API to build a new "added" item
108 108
109 109 Should eventually be removed
110 110 """
111 111 instance = cls()
112 112 instance._wc_tracked = True
113 113 instance._p1_tracked = False
114 114 instance._p2_tracked = False
115 115 return instance
116 116
117 117 @classmethod
118 118 def new_merged(cls):
119 119 """constructor to help legacy API to build a new "merged" item
120 120
121 121 Should eventually be removed
122 122 """
123 123 instance = cls()
124 124 instance._wc_tracked = True
125 125 instance._p1_tracked = True # might not be True because of rename ?
126 126 instance._p2_tracked = True # might not be True because of rename ?
127 127 instance._merged = True
128 128 return instance
129 129
130 130 @classmethod
131 131 def new_from_p2(cls):
132 132 """constructor to help legacy API to build a new "from_p2" item
133 133
134 134 Should eventually be removed
135 135 """
136 136 instance = cls()
137 137 instance._wc_tracked = True
138 138 instance._p1_tracked = False # might actually be True
139 139 instance._p2_tracked = True
140 140 instance._clean_p2 = True
141 141 return instance
142 142
143 143 @classmethod
144 144 def new_possibly_dirty(cls):
145 145 """constructor to help legacy API to build a new "possibly_dirty" item
146 146
147 147 Should eventually be removed
148 148 """
149 149 instance = cls()
150 150 instance._wc_tracked = True
151 151 instance._p1_tracked = True
152 152 instance._possibly_dirty = True
153 153 return instance
154 154
155 155 @classmethod
156 156 def new_normal(cls, mode, size, mtime):
157 157 """constructor to help legacy API to build a new "normal" item
158 158
159 159 Should eventually be removed
160 160 """
161 161 assert size != FROM_P2
162 162 assert size != NONNORMAL
163 163 instance = cls()
164 164 instance._wc_tracked = True
165 165 instance._p1_tracked = True
166 166 instance._mode = mode
167 167 instance._size = size
168 168 instance._mtime = mtime
169 169 return instance
170 170
171 171 @classmethod
172 172 def from_v1_data(cls, state, mode, size, mtime):
173 173 """Build a new DirstateItem object from V1 data
174 174
175 175 Since the dirstate-v1 format is frozen, the signature of this function
176 176 is not expected to change, unlike the __init__ one.
177 177 """
178 178 if state == b'm':
179 179 return cls.new_merged()
180 180 elif state == b'a':
181 181 return cls.new_added()
182 182 elif state == b'r':
183 183 instance = cls()
184 184 instance._wc_tracked = False
185 185 if size == NONNORMAL:
186 186 instance._merged = True
187 187 instance._p1_tracked = (
188 188 True # might not be True because of rename ?
189 189 )
190 190 instance._p2_tracked = (
191 191 True # might not be True because of rename ?
192 192 )
193 193 elif size == FROM_P2:
194 194 instance._clean_p2 = True
195 195 instance._p1_tracked = (
196 196 False # We actually don't know (file history)
197 197 )
198 198 instance._p2_tracked = True
199 199 else:
200 200 instance._p1_tracked = True
201 201 return instance
202 202 elif state == b'n':
203 203 if size == FROM_P2:
204 204 return cls.new_from_p2()
205 205 elif size == NONNORMAL:
206 206 return cls.new_possibly_dirty()
207 207 elif mtime == AMBIGUOUS_TIME:
208 208 instance = cls.new_normal(mode, size, 42)
209 209 instance._mtime = None
210 210 instance._possibly_dirty = True
211 211 return instance
212 212 else:
213 213 return cls.new_normal(mode, size, mtime)
214 214 else:
215 215 raise RuntimeError(b'unknown state: %s' % state)
216 216
217 217 def set_possibly_dirty(self):
218 218 """Mark a file as "possibly dirty"
219 219
220 220 This means the next status call will have to actually check its content
221 221 to make sure it is correct.
222 222 """
223 223 self._possibly_dirty = True
224 224
225 225 def set_untracked(self):
226 226 """mark a file as untracked in the working copy
227 227
228 228 This will ultimately be called by command like `hg remove`.
229 229 """
230 230 # backup the previous state (useful for merge)
231 231 self._wc_tracked = False
232 232 self._mode = None
233 233 self._size = None
234 234 self._mtime = None
235 235
236 236 @property
237 237 def mode(self):
238 238 return self.v1_mode()
239 239
240 240 @property
241 241 def size(self):
242 242 return self.v1_size()
243 243
244 244 @property
245 245 def mtime(self):
246 246 return self.v1_mtime()
247 247
248 248 @property
249 249 def state(self):
250 250 """
251 251 States are:
252 252 n normal
253 253 m needs merging
254 254 r marked for removal
255 255 a marked for addition
256 256
257 257 XXX This "state" is a bit obscure and mostly a direct expression of the
258 258 dirstatev1 format. It would make sense to ultimately deprecate it in
259 259 favor of the more "semantic" attributes.
260 260 """
261 261 return self.v1_state()
262 262
263 263 @property
264 264 def tracked(self):
265 265 """True is the file is tracked in the working copy"""
266 266 return self._wc_tracked
267 267
268 268 @property
269 269 def added(self):
270 270 """True if the file has been added"""
271 271 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
272 272
273 273 @property
274 274 def merged(self):
275 275 """True if the file has been merged
276 276
277 277 Should only be set if a merge is in progress in the dirstate
278 278 """
279 279 return self._wc_tracked and self._merged
280 280
281 281 @property
282 282 def from_p2(self):
283 283 """True if the file have been fetched from p2 during the current merge
284 284
285 285 This is only True is the file is currently tracked.
286 286
287 287 Should only be set if a merge is in progress in the dirstate
288 288 """
289 289 if not self._wc_tracked:
290 290 return False
291 291 return self._clean_p2 or (not self._p1_tracked and self._p2_tracked)
292 292
293 293 @property
294 294 def from_p2_removed(self):
295 295 """True if the file has been removed, but was "from_p2" initially
296 296
297 297 This property seems like an abstraction leakage and should probably be
298 298 dealt in this class (or maybe the dirstatemap) directly.
299 299 """
300 300 return self.removed and self._clean_p2
301 301
302 302 @property
303 303 def removed(self):
304 304 """True if the file has been removed"""
305 305 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
306 306
307 307 @property
308 308 def merged_removed(self):
309 309 """True if the file has been removed, but was "merged" initially
310 310
311 311 This property seems like an abstraction leakage and should probably be
312 312 dealt in this class (or maybe the dirstatemap) directly.
313 313 """
314 314 return self.removed and self._merged
315 315
316 316 @property
317 317 def dm_nonnormal(self):
318 318 """True is the entry is non-normal in the dirstatemap sense
319 319
320 320 There is no reason for any code, but the dirstatemap one to use this.
321 321 """
322 322 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
323 323
324 324 @property
325 325 def dm_otherparent(self):
326 326 """True is the entry is `otherparent` in the dirstatemap sense
327 327
328 328 There is no reason for any code, but the dirstatemap one to use this.
329 329 """
330 330 return self.v1_size() == FROM_P2
331 331
332 332 def v1_state(self):
333 333 """return a "state" suitable for v1 serialization"""
334 334 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
335 335 # the object has no state to record, this is -currently-
336 336 # unsupported
337 337 raise RuntimeError('untracked item')
338 338 elif self.removed:
339 339 return b'r'
340 340 elif self.merged:
341 341 return b'm'
342 342 elif self.added:
343 343 return b'a'
344 344 else:
345 345 return b'n'
346 346
347 347 def v1_mode(self):
348 348 """return a "mode" suitable for v1 serialization"""
349 349 return self._mode if self._mode is not None else 0
350 350
351 351 def v1_size(self):
352 352 """return a "size" suitable for v1 serialization"""
353 353 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
354 354 # the object has no state to record, this is -currently-
355 355 # unsupported
356 356 raise RuntimeError('untracked item')
357 elif not self._wc_tracked:
358 # File was deleted
359 if self._merged:
360 return NONNORMAL
361 elif self._clean_p2:
362 return FROM_P2
363 else:
364 return 0
365 elif self._merged:
357 elif self.merged_removed:
358 return NONNORMAL
359 elif self.from_p2_removed:
366 360 return FROM_P2
367 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
368 # Added
361 elif self.removed:
362 return 0
363 elif self.merged:
364 return FROM_P2
365 elif self.added:
369 366 return NONNORMAL
370 elif self._clean_p2 and self._wc_tracked:
371 return FROM_P2
372 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
367 elif self.from_p2:
373 368 return FROM_P2
374 369 elif self._possibly_dirty:
375 if self._size is None:
376 return NONNORMAL
377 else:
378 return self._size
379 elif self._wc_tracked:
370 return self._size if self._size is not None else NONNORMAL
371 else:
380 372 return self._size
381 else:
382 raise RuntimeError('unreachable')
383 373
384 374 def v1_mtime(self):
385 375 """return a "mtime" suitable for v1 serialization"""
386 376 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
387 377 # the object has no state to record, this is -currently-
388 378 # unsupported
389 379 raise RuntimeError('untracked item')
390 380 elif not self._wc_tracked:
391 381 return 0
392 382 elif self._possibly_dirty:
393 383 return AMBIGUOUS_TIME
394 384 elif self._merged:
395 385 return AMBIGUOUS_TIME
396 386 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
397 387 return AMBIGUOUS_TIME
398 388 elif self._clean_p2 and self._wc_tracked:
399 389 return AMBIGUOUS_TIME
400 390 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
401 391 return AMBIGUOUS_TIME
402 392 elif self._wc_tracked:
403 393 if self._mtime is None:
404 394 return 0
405 395 else:
406 396 return self._mtime
407 397 else:
408 398 raise RuntimeError('unreachable')
409 399
410 400 def need_delay(self, now):
411 401 """True if the stored mtime would be ambiguous with the current time"""
412 402 return self.v1_state() == b'n' and self.v1_mtime() == now
413 403
414 404
415 405 def gettype(q):
416 406 return int(q & 0xFFFF)
417 407
418 408
419 409 class BaseIndexObject(object):
420 410 # Can I be passed to an algorithme implemented in Rust ?
421 411 rust_ext_compat = 0
422 412 # Format of an index entry according to Python's `struct` language
423 413 index_format = revlog_constants.INDEX_ENTRY_V1
424 414 # Size of a C unsigned long long int, platform independent
425 415 big_int_size = struct.calcsize(b'>Q')
426 416 # Size of a C long int, platform independent
427 417 int_size = struct.calcsize(b'>i')
428 418 # An empty index entry, used as a default value to be overridden, or nullrev
429 419 null_item = (
430 420 0,
431 421 0,
432 422 0,
433 423 -1,
434 424 -1,
435 425 -1,
436 426 -1,
437 427 sha1nodeconstants.nullid,
438 428 0,
439 429 0,
440 430 revlog_constants.COMP_MODE_INLINE,
441 431 revlog_constants.COMP_MODE_INLINE,
442 432 )
443 433
444 434 @util.propertycache
445 435 def entry_size(self):
446 436 return self.index_format.size
447 437
448 438 @property
449 439 def nodemap(self):
450 440 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
451 441 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
452 442 return self._nodemap
453 443
454 444 @util.propertycache
455 445 def _nodemap(self):
456 446 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
457 447 for r in range(0, len(self)):
458 448 n = self[r][7]
459 449 nodemap[n] = r
460 450 return nodemap
461 451
462 452 def has_node(self, node):
463 453 """return True if the node exist in the index"""
464 454 return node in self._nodemap
465 455
466 456 def rev(self, node):
467 457 """return a revision for a node
468 458
469 459 If the node is unknown, raise a RevlogError"""
470 460 return self._nodemap[node]
471 461
472 462 def get_rev(self, node):
473 463 """return a revision for a node
474 464
475 465 If the node is unknown, return None"""
476 466 return self._nodemap.get(node)
477 467
478 468 def _stripnodes(self, start):
479 469 if '_nodemap' in vars(self):
480 470 for r in range(start, len(self)):
481 471 n = self[r][7]
482 472 del self._nodemap[n]
483 473
484 474 def clearcaches(self):
485 475 self.__dict__.pop('_nodemap', None)
486 476
487 477 def __len__(self):
488 478 return self._lgt + len(self._extra)
489 479
490 480 def append(self, tup):
491 481 if '_nodemap' in vars(self):
492 482 self._nodemap[tup[7]] = len(self)
493 483 data = self._pack_entry(len(self), tup)
494 484 self._extra.append(data)
495 485
496 486 def _pack_entry(self, rev, entry):
497 487 assert entry[8] == 0
498 488 assert entry[9] == 0
499 489 return self.index_format.pack(*entry[:8])
500 490
501 491 def _check_index(self, i):
502 492 if not isinstance(i, int):
503 493 raise TypeError(b"expecting int indexes")
504 494 if i < 0 or i >= len(self):
505 495 raise IndexError
506 496
507 497 def __getitem__(self, i):
508 498 if i == -1:
509 499 return self.null_item
510 500 self._check_index(i)
511 501 if i >= self._lgt:
512 502 data = self._extra[i - self._lgt]
513 503 else:
514 504 index = self._calculate_index(i)
515 505 data = self._data[index : index + self.entry_size]
516 506 r = self._unpack_entry(i, data)
517 507 if self._lgt and i == 0:
518 508 offset = revlogutils.offset_type(0, gettype(r[0]))
519 509 r = (offset,) + r[1:]
520 510 return r
521 511
522 512 def _unpack_entry(self, rev, data):
523 513 r = self.index_format.unpack(data)
524 514 r = r + (
525 515 0,
526 516 0,
527 517 revlog_constants.COMP_MODE_INLINE,
528 518 revlog_constants.COMP_MODE_INLINE,
529 519 )
530 520 return r
531 521
532 522 def pack_header(self, header):
533 523 """pack header information as binary"""
534 524 v_fmt = revlog_constants.INDEX_HEADER
535 525 return v_fmt.pack(header)
536 526
537 527 def entry_binary(self, rev):
538 528 """return the raw binary string representing a revision"""
539 529 entry = self[rev]
540 530 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
541 531 if rev == 0:
542 532 p = p[revlog_constants.INDEX_HEADER.size :]
543 533 return p
544 534
545 535
546 536 class IndexObject(BaseIndexObject):
547 537 def __init__(self, data):
548 538 assert len(data) % self.entry_size == 0, (
549 539 len(data),
550 540 self.entry_size,
551 541 len(data) % self.entry_size,
552 542 )
553 543 self._data = data
554 544 self._lgt = len(data) // self.entry_size
555 545 self._extra = []
556 546
557 547 def _calculate_index(self, i):
558 548 return i * self.entry_size
559 549
560 550 def __delitem__(self, i):
561 551 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
562 552 raise ValueError(b"deleting slices only supports a:-1 with step 1")
563 553 i = i.start
564 554 self._check_index(i)
565 555 self._stripnodes(i)
566 556 if i < self._lgt:
567 557 self._data = self._data[: i * self.entry_size]
568 558 self._lgt = i
569 559 self._extra = []
570 560 else:
571 561 self._extra = self._extra[: i - self._lgt]
572 562
573 563
574 564 class PersistentNodeMapIndexObject(IndexObject):
575 565 """a Debug oriented class to test persistent nodemap
576 566
577 567 We need a simple python object to test API and higher level behavior. See
578 568 the Rust implementation for more serious usage. This should be used only
579 569 through the dedicated `devel.persistent-nodemap` config.
580 570 """
581 571
582 572 def nodemap_data_all(self):
583 573 """Return bytes containing a full serialization of a nodemap
584 574
585 575 The nodemap should be valid for the full set of revisions in the
586 576 index."""
587 577 return nodemaputil.persistent_data(self)
588 578
589 579 def nodemap_data_incremental(self):
590 580 """Return bytes containing a incremental update to persistent nodemap
591 581
592 582 This containst the data for an append-only update of the data provided
593 583 in the last call to `update_nodemap_data`.
594 584 """
595 585 if self._nm_root is None:
596 586 return None
597 587 docket = self._nm_docket
598 588 changed, data = nodemaputil.update_persistent_data(
599 589 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
600 590 )
601 591
602 592 self._nm_root = self._nm_max_idx = self._nm_docket = None
603 593 return docket, changed, data
604 594
605 595 def update_nodemap_data(self, docket, nm_data):
606 596 """provide full block of persisted binary data for a nodemap
607 597
608 598 The data are expected to come from disk. See `nodemap_data_all` for a
609 599 produceur of such data."""
610 600 if nm_data is not None:
611 601 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
612 602 if self._nm_root:
613 603 self._nm_docket = docket
614 604 else:
615 605 self._nm_root = self._nm_max_idx = self._nm_docket = None
616 606
617 607
618 608 class InlinedIndexObject(BaseIndexObject):
619 609 def __init__(self, data, inline=0):
620 610 self._data = data
621 611 self._lgt = self._inline_scan(None)
622 612 self._inline_scan(self._lgt)
623 613 self._extra = []
624 614
625 615 def _inline_scan(self, lgt):
626 616 off = 0
627 617 if lgt is not None:
628 618 self._offsets = [0] * lgt
629 619 count = 0
630 620 while off <= len(self._data) - self.entry_size:
631 621 start = off + self.big_int_size
632 622 (s,) = struct.unpack(
633 623 b'>i',
634 624 self._data[start : start + self.int_size],
635 625 )
636 626 if lgt is not None:
637 627 self._offsets[count] = off
638 628 count += 1
639 629 off += self.entry_size + s
640 630 if off != len(self._data):
641 631 raise ValueError(b"corrupted data")
642 632 return count
643 633
644 634 def __delitem__(self, i):
645 635 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
646 636 raise ValueError(b"deleting slices only supports a:-1 with step 1")
647 637 i = i.start
648 638 self._check_index(i)
649 639 self._stripnodes(i)
650 640 if i < self._lgt:
651 641 self._offsets = self._offsets[:i]
652 642 self._lgt = i
653 643 self._extra = []
654 644 else:
655 645 self._extra = self._extra[: i - self._lgt]
656 646
657 647 def _calculate_index(self, i):
658 648 return self._offsets[i]
659 649
660 650
661 651 def parse_index2(data, inline, revlogv2=False):
662 652 if not inline:
663 653 cls = IndexObject2 if revlogv2 else IndexObject
664 654 return cls(data), None
665 655 cls = InlinedIndexObject
666 656 return cls(data, inline), (0, data)
667 657
668 658
669 659 def parse_index_cl_v2(data):
670 660 return IndexChangelogV2(data), None
671 661
672 662
673 663 class IndexObject2(IndexObject):
674 664 index_format = revlog_constants.INDEX_ENTRY_V2
675 665
676 666 def replace_sidedata_info(
677 667 self,
678 668 rev,
679 669 sidedata_offset,
680 670 sidedata_length,
681 671 offset_flags,
682 672 compression_mode,
683 673 ):
684 674 """
685 675 Replace an existing index entry's sidedata offset and length with new
686 676 ones.
687 677 This cannot be used outside of the context of sidedata rewriting,
688 678 inside the transaction that creates the revision `rev`.
689 679 """
690 680 if rev < 0:
691 681 raise KeyError
692 682 self._check_index(rev)
693 683 if rev < self._lgt:
694 684 msg = b"cannot rewrite entries outside of this transaction"
695 685 raise KeyError(msg)
696 686 else:
697 687 entry = list(self[rev])
698 688 entry[0] = offset_flags
699 689 entry[8] = sidedata_offset
700 690 entry[9] = sidedata_length
701 691 entry[11] = compression_mode
702 692 entry = tuple(entry)
703 693 new = self._pack_entry(rev, entry)
704 694 self._extra[rev - self._lgt] = new
705 695
706 696 def _unpack_entry(self, rev, data):
707 697 data = self.index_format.unpack(data)
708 698 entry = data[:10]
709 699 data_comp = data[10] & 3
710 700 sidedata_comp = (data[10] & (3 << 2)) >> 2
711 701 return entry + (data_comp, sidedata_comp)
712 702
713 703 def _pack_entry(self, rev, entry):
714 704 data = entry[:10]
715 705 data_comp = entry[10] & 3
716 706 sidedata_comp = (entry[11] & 3) << 2
717 707 data += (data_comp | sidedata_comp,)
718 708
719 709 return self.index_format.pack(*data)
720 710
721 711 def entry_binary(self, rev):
722 712 """return the raw binary string representing a revision"""
723 713 entry = self[rev]
724 714 return self._pack_entry(rev, entry)
725 715
726 716 def pack_header(self, header):
727 717 """pack header information as binary"""
728 718 msg = 'version header should go in the docket, not the index: %d'
729 719 msg %= header
730 720 raise error.ProgrammingError(msg)
731 721
732 722
733 723 class IndexChangelogV2(IndexObject2):
734 724 index_format = revlog_constants.INDEX_ENTRY_CL_V2
735 725
736 726 def _unpack_entry(self, rev, data, r=True):
737 727 items = self.index_format.unpack(data)
738 728 entry = items[:3] + (rev, rev) + items[3:8]
739 729 data_comp = items[8] & 3
740 730 sidedata_comp = (items[8] >> 2) & 3
741 731 return entry + (data_comp, sidedata_comp)
742 732
743 733 def _pack_entry(self, rev, entry):
744 734 assert entry[3] == rev, entry[3]
745 735 assert entry[4] == rev, entry[4]
746 736 data = entry[:3] + entry[5:10]
747 737 data_comp = entry[10] & 3
748 738 sidedata_comp = (entry[11] & 3) << 2
749 739 data += (data_comp | sidedata_comp,)
750 740 return self.index_format.pack(*data)
751 741
752 742
753 743 def parse_index_devel_nodemap(data, inline):
754 744 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
755 745 return PersistentNodeMapIndexObject(data), None
756 746
757 747
758 748 def parse_dirstate(dmap, copymap, st):
759 749 parents = [st[:20], st[20:40]]
760 750 # dereference fields so they will be local in loop
761 751 format = b">cllll"
762 752 e_size = struct.calcsize(format)
763 753 pos1 = 40
764 754 l = len(st)
765 755
766 756 # the inner loop
767 757 while pos1 < l:
768 758 pos2 = pos1 + e_size
769 759 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
770 760 pos1 = pos2 + e[4]
771 761 f = st[pos2:pos1]
772 762 if b'\0' in f:
773 763 f, c = f.split(b'\0')
774 764 copymap[f] = c
775 765 dmap[f] = DirstateItem.from_v1_data(*e[:4])
776 766 return parents
777 767
778 768
779 769 def pack_dirstate(dmap, copymap, pl, now):
780 770 now = int(now)
781 771 cs = stringio()
782 772 write = cs.write
783 773 write(b"".join(pl))
784 774 for f, e in pycompat.iteritems(dmap):
785 775 if e.need_delay(now):
786 776 # The file was last modified "simultaneously" with the current
787 777 # write to dirstate (i.e. within the same second for file-
788 778 # systems with a granularity of 1 sec). This commonly happens
789 779 # for at least a couple of files on 'update'.
790 780 # The user could change the file without changing its size
791 781 # within the same second. Invalidate the file's mtime in
792 782 # dirstate, forcing future 'status' calls to compare the
793 783 # contents of the file if the size is the same. This prevents
794 784 # mistakenly treating such files as clean.
795 785 e.set_possibly_dirty()
796 786
797 787 if f in copymap:
798 788 f = b"%s\0%s" % (f, copymap[f])
799 789 e = _pack(
800 790 b">cllll",
801 791 e.v1_state(),
802 792 e.v1_mode(),
803 793 e.v1_size(),
804 794 e.v1_mtime(),
805 795 len(f),
806 796 )
807 797 write(e)
808 798 write(f)
809 799 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now