##// END OF EJS Templates
dirstate-item: implement `v1_mtime` with higher level block...
marmoute -
r48750:508394e3 default
parent child Browse files
Show More
@@ -1,799 +1,792 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from ..thirdparty import attr
18 18 from .. import (
19 19 error,
20 20 pycompat,
21 21 revlogutils,
22 22 util,
23 23 )
24 24
25 25 from ..revlogutils import nodemap as nodemaputil
26 26 from ..revlogutils import constants as revlog_constants
27 27
28 28 stringio = pycompat.bytesio
29 29
30 30
31 31 _pack = struct.pack
32 32 _unpack = struct.unpack
33 33 _compress = zlib.compress
34 34 _decompress = zlib.decompress
35 35
36 36
37 37 # a special value used internally for `size` if the file come from the other parent
38 38 FROM_P2 = -2
39 39
40 40 # a special value used internally for `size` if the file is modified/merged/added
41 41 NONNORMAL = -1
42 42
43 43 # a special value used internally for `time` if the time is ambigeous
44 44 AMBIGUOUS_TIME = -1
45 45
46 46
47 47 @attr.s(slots=True, init=False)
48 48 class DirstateItem(object):
49 49 """represent a dirstate entry
50 50
51 51 It contains:
52 52
53 53 - state (one of 'n', 'a', 'r', 'm')
54 54 - mode,
55 55 - size,
56 56 - mtime,
57 57 """
58 58
59 59 _wc_tracked = attr.ib()
60 60 _p1_tracked = attr.ib()
61 61 _p2_tracked = attr.ib()
62 62 # the three item above should probably be combined
63 63 #
64 64 # However it is unclear if they properly cover some of the most advanced
65 65 # merge case. So we should probably wait on this to be settled.
66 66 _merged = attr.ib()
67 67 _clean_p1 = attr.ib()
68 68 _clean_p2 = attr.ib()
69 69 _possibly_dirty = attr.ib()
70 70 _mode = attr.ib()
71 71 _size = attr.ib()
72 72 _mtime = attr.ib()
73 73
74 74 def __init__(
75 75 self,
76 76 wc_tracked=False,
77 77 p1_tracked=False,
78 78 p2_tracked=False,
79 79 merged=False,
80 80 clean_p1=False,
81 81 clean_p2=False,
82 82 possibly_dirty=False,
83 83 parentfiledata=None,
84 84 ):
85 85 if merged and (clean_p1 or clean_p2):
86 86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
87 87 raise error.ProgrammingError(msg)
88 88
89 89 self._wc_tracked = wc_tracked
90 90 self._p1_tracked = p1_tracked
91 91 self._p2_tracked = p2_tracked
92 92 self._merged = merged
93 93 self._clean_p1 = clean_p1
94 94 self._clean_p2 = clean_p2
95 95 self._possibly_dirty = possibly_dirty
96 96 if parentfiledata is None:
97 97 self._mode = None
98 98 self._size = None
99 99 self._mtime = None
100 100 else:
101 101 self._mode = parentfiledata[0]
102 102 self._size = parentfiledata[1]
103 103 self._mtime = parentfiledata[2]
104 104
105 105 @classmethod
106 106 def new_added(cls):
107 107 """constructor to help legacy API to build a new "added" item
108 108
109 109 Should eventually be removed
110 110 """
111 111 instance = cls()
112 112 instance._wc_tracked = True
113 113 instance._p1_tracked = False
114 114 instance._p2_tracked = False
115 115 return instance
116 116
117 117 @classmethod
118 118 def new_merged(cls):
119 119 """constructor to help legacy API to build a new "merged" item
120 120
121 121 Should eventually be removed
122 122 """
123 123 instance = cls()
124 124 instance._wc_tracked = True
125 125 instance._p1_tracked = True # might not be True because of rename ?
126 126 instance._p2_tracked = True # might not be True because of rename ?
127 127 instance._merged = True
128 128 return instance
129 129
130 130 @classmethod
131 131 def new_from_p2(cls):
132 132 """constructor to help legacy API to build a new "from_p2" item
133 133
134 134 Should eventually be removed
135 135 """
136 136 instance = cls()
137 137 instance._wc_tracked = True
138 138 instance._p1_tracked = False # might actually be True
139 139 instance._p2_tracked = True
140 140 instance._clean_p2 = True
141 141 return instance
142 142
143 143 @classmethod
144 144 def new_possibly_dirty(cls):
145 145 """constructor to help legacy API to build a new "possibly_dirty" item
146 146
147 147 Should eventually be removed
148 148 """
149 149 instance = cls()
150 150 instance._wc_tracked = True
151 151 instance._p1_tracked = True
152 152 instance._possibly_dirty = True
153 153 return instance
154 154
155 155 @classmethod
156 156 def new_normal(cls, mode, size, mtime):
157 157 """constructor to help legacy API to build a new "normal" item
158 158
159 159 Should eventually be removed
160 160 """
161 161 assert size != FROM_P2
162 162 assert size != NONNORMAL
163 163 instance = cls()
164 164 instance._wc_tracked = True
165 165 instance._p1_tracked = True
166 166 instance._mode = mode
167 167 instance._size = size
168 168 instance._mtime = mtime
169 169 return instance
170 170
171 171 @classmethod
172 172 def from_v1_data(cls, state, mode, size, mtime):
173 173 """Build a new DirstateItem object from V1 data
174 174
175 175 Since the dirstate-v1 format is frozen, the signature of this function
176 176 is not expected to change, unlike the __init__ one.
177 177 """
178 178 if state == b'm':
179 179 return cls.new_merged()
180 180 elif state == b'a':
181 181 return cls.new_added()
182 182 elif state == b'r':
183 183 instance = cls()
184 184 instance._wc_tracked = False
185 185 if size == NONNORMAL:
186 186 instance._merged = True
187 187 instance._p1_tracked = (
188 188 True # might not be True because of rename ?
189 189 )
190 190 instance._p2_tracked = (
191 191 True # might not be True because of rename ?
192 192 )
193 193 elif size == FROM_P2:
194 194 instance._clean_p2 = True
195 195 instance._p1_tracked = (
196 196 False # We actually don't know (file history)
197 197 )
198 198 instance._p2_tracked = True
199 199 else:
200 200 instance._p1_tracked = True
201 201 return instance
202 202 elif state == b'n':
203 203 if size == FROM_P2:
204 204 return cls.new_from_p2()
205 205 elif size == NONNORMAL:
206 206 return cls.new_possibly_dirty()
207 207 elif mtime == AMBIGUOUS_TIME:
208 208 instance = cls.new_normal(mode, size, 42)
209 209 instance._mtime = None
210 210 instance._possibly_dirty = True
211 211 return instance
212 212 else:
213 213 return cls.new_normal(mode, size, mtime)
214 214 else:
215 215 raise RuntimeError(b'unknown state: %s' % state)
216 216
217 217 def set_possibly_dirty(self):
218 218 """Mark a file as "possibly dirty"
219 219
220 220 This means the next status call will have to actually check its content
221 221 to make sure it is correct.
222 222 """
223 223 self._possibly_dirty = True
224 224
225 225 def set_untracked(self):
226 226 """mark a file as untracked in the working copy
227 227
228 228 This will ultimately be called by command like `hg remove`.
229 229 """
230 230 # backup the previous state (useful for merge)
231 231 self._wc_tracked = False
232 232 self._mode = None
233 233 self._size = None
234 234 self._mtime = None
235 235
236 236 @property
237 237 def mode(self):
238 238 return self.v1_mode()
239 239
240 240 @property
241 241 def size(self):
242 242 return self.v1_size()
243 243
244 244 @property
245 245 def mtime(self):
246 246 return self.v1_mtime()
247 247
248 248 @property
249 249 def state(self):
250 250 """
251 251 States are:
252 252 n normal
253 253 m needs merging
254 254 r marked for removal
255 255 a marked for addition
256 256
257 257 XXX This "state" is a bit obscure and mostly a direct expression of the
258 258 dirstatev1 format. It would make sense to ultimately deprecate it in
259 259 favor of the more "semantic" attributes.
260 260 """
261 261 return self.v1_state()
262 262
263 263 @property
264 264 def tracked(self):
265 265 """True is the file is tracked in the working copy"""
266 266 return self._wc_tracked
267 267
268 268 @property
269 269 def added(self):
270 270 """True if the file has been added"""
271 271 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
272 272
273 273 @property
274 274 def merged(self):
275 275 """True if the file has been merged
276 276
277 277 Should only be set if a merge is in progress in the dirstate
278 278 """
279 279 return self._wc_tracked and self._merged
280 280
281 281 @property
282 282 def from_p2(self):
283 283 """True if the file have been fetched from p2 during the current merge
284 284
285 285 This is only True is the file is currently tracked.
286 286
287 287 Should only be set if a merge is in progress in the dirstate
288 288 """
289 289 if not self._wc_tracked:
290 290 return False
291 291 return self._clean_p2 or (not self._p1_tracked and self._p2_tracked)
292 292
293 293 @property
294 294 def from_p2_removed(self):
295 295 """True if the file has been removed, but was "from_p2" initially
296 296
297 297 This property seems like an abstraction leakage and should probably be
298 298 dealt in this class (or maybe the dirstatemap) directly.
299 299 """
300 300 return self.removed and self._clean_p2
301 301
302 302 @property
303 303 def removed(self):
304 304 """True if the file has been removed"""
305 305 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
306 306
307 307 @property
308 308 def merged_removed(self):
309 309 """True if the file has been removed, but was "merged" initially
310 310
311 311 This property seems like an abstraction leakage and should probably be
312 312 dealt in this class (or maybe the dirstatemap) directly.
313 313 """
314 314 return self.removed and self._merged
315 315
316 316 @property
317 317 def dm_nonnormal(self):
318 318 """True is the entry is non-normal in the dirstatemap sense
319 319
320 320 There is no reason for any code, but the dirstatemap one to use this.
321 321 """
322 322 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
323 323
324 324 @property
325 325 def dm_otherparent(self):
326 326 """True is the entry is `otherparent` in the dirstatemap sense
327 327
328 328 There is no reason for any code, but the dirstatemap one to use this.
329 329 """
330 330 return self.v1_size() == FROM_P2
331 331
332 332 def v1_state(self):
333 333 """return a "state" suitable for v1 serialization"""
334 334 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
335 335 # the object has no state to record, this is -currently-
336 336 # unsupported
337 337 raise RuntimeError('untracked item')
338 338 elif self.removed:
339 339 return b'r'
340 340 elif self.merged:
341 341 return b'm'
342 342 elif self.added:
343 343 return b'a'
344 344 else:
345 345 return b'n'
346 346
347 347 def v1_mode(self):
348 348 """return a "mode" suitable for v1 serialization"""
349 349 return self._mode if self._mode is not None else 0
350 350
351 351 def v1_size(self):
352 352 """return a "size" suitable for v1 serialization"""
353 353 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
354 354 # the object has no state to record, this is -currently-
355 355 # unsupported
356 356 raise RuntimeError('untracked item')
357 357 elif self.merged_removed:
358 358 return NONNORMAL
359 359 elif self.from_p2_removed:
360 360 return FROM_P2
361 361 elif self.removed:
362 362 return 0
363 363 elif self.merged:
364 364 return FROM_P2
365 365 elif self.added:
366 366 return NONNORMAL
367 367 elif self.from_p2:
368 368 return FROM_P2
369 369 elif self._possibly_dirty:
370 370 return self._size if self._size is not None else NONNORMAL
371 371 else:
372 372 return self._size
373 373
374 374 def v1_mtime(self):
375 375 """return a "mtime" suitable for v1 serialization"""
376 376 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
377 377 # the object has no state to record, this is -currently-
378 378 # unsupported
379 379 raise RuntimeError('untracked item')
380 elif not self._wc_tracked:
380 elif self.removed:
381 381 return 0
382 382 elif self._possibly_dirty:
383 383 return AMBIGUOUS_TIME
384 elif self._merged:
385 return AMBIGUOUS_TIME
386 elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked:
384 elif self.merged:
387 385 return AMBIGUOUS_TIME
388 elif self._clean_p2 and self._wc_tracked:
389 return AMBIGUOUS_TIME
390 elif not self._p1_tracked and self._p2_tracked and self._wc_tracked:
386 elif self.added:
391 387 return AMBIGUOUS_TIME
392 elif self._wc_tracked:
393 if self._mtime is None:
394 return 0
388 elif self.from_p2:
389 return AMBIGUOUS_TIME
395 390 else:
396 return self._mtime
397 else:
398 raise RuntimeError('unreachable')
391 return self._mtime if self._mtime is not None else 0
399 392
400 393 def need_delay(self, now):
401 394 """True if the stored mtime would be ambiguous with the current time"""
402 395 return self.v1_state() == b'n' and self.v1_mtime() == now
403 396
404 397
405 398 def gettype(q):
406 399 return int(q & 0xFFFF)
407 400
408 401
409 402 class BaseIndexObject(object):
410 403 # Can I be passed to an algorithme implemented in Rust ?
411 404 rust_ext_compat = 0
412 405 # Format of an index entry according to Python's `struct` language
413 406 index_format = revlog_constants.INDEX_ENTRY_V1
414 407 # Size of a C unsigned long long int, platform independent
415 408 big_int_size = struct.calcsize(b'>Q')
416 409 # Size of a C long int, platform independent
417 410 int_size = struct.calcsize(b'>i')
418 411 # An empty index entry, used as a default value to be overridden, or nullrev
419 412 null_item = (
420 413 0,
421 414 0,
422 415 0,
423 416 -1,
424 417 -1,
425 418 -1,
426 419 -1,
427 420 sha1nodeconstants.nullid,
428 421 0,
429 422 0,
430 423 revlog_constants.COMP_MODE_INLINE,
431 424 revlog_constants.COMP_MODE_INLINE,
432 425 )
433 426
434 427 @util.propertycache
435 428 def entry_size(self):
436 429 return self.index_format.size
437 430
438 431 @property
439 432 def nodemap(self):
440 433 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
441 434 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
442 435 return self._nodemap
443 436
444 437 @util.propertycache
445 438 def _nodemap(self):
446 439 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
447 440 for r in range(0, len(self)):
448 441 n = self[r][7]
449 442 nodemap[n] = r
450 443 return nodemap
451 444
452 445 def has_node(self, node):
453 446 """return True if the node exist in the index"""
454 447 return node in self._nodemap
455 448
456 449 def rev(self, node):
457 450 """return a revision for a node
458 451
459 452 If the node is unknown, raise a RevlogError"""
460 453 return self._nodemap[node]
461 454
462 455 def get_rev(self, node):
463 456 """return a revision for a node
464 457
465 458 If the node is unknown, return None"""
466 459 return self._nodemap.get(node)
467 460
468 461 def _stripnodes(self, start):
469 462 if '_nodemap' in vars(self):
470 463 for r in range(start, len(self)):
471 464 n = self[r][7]
472 465 del self._nodemap[n]
473 466
474 467 def clearcaches(self):
475 468 self.__dict__.pop('_nodemap', None)
476 469
477 470 def __len__(self):
478 471 return self._lgt + len(self._extra)
479 472
480 473 def append(self, tup):
481 474 if '_nodemap' in vars(self):
482 475 self._nodemap[tup[7]] = len(self)
483 476 data = self._pack_entry(len(self), tup)
484 477 self._extra.append(data)
485 478
486 479 def _pack_entry(self, rev, entry):
487 480 assert entry[8] == 0
488 481 assert entry[9] == 0
489 482 return self.index_format.pack(*entry[:8])
490 483
491 484 def _check_index(self, i):
492 485 if not isinstance(i, int):
493 486 raise TypeError(b"expecting int indexes")
494 487 if i < 0 or i >= len(self):
495 488 raise IndexError
496 489
497 490 def __getitem__(self, i):
498 491 if i == -1:
499 492 return self.null_item
500 493 self._check_index(i)
501 494 if i >= self._lgt:
502 495 data = self._extra[i - self._lgt]
503 496 else:
504 497 index = self._calculate_index(i)
505 498 data = self._data[index : index + self.entry_size]
506 499 r = self._unpack_entry(i, data)
507 500 if self._lgt and i == 0:
508 501 offset = revlogutils.offset_type(0, gettype(r[0]))
509 502 r = (offset,) + r[1:]
510 503 return r
511 504
512 505 def _unpack_entry(self, rev, data):
513 506 r = self.index_format.unpack(data)
514 507 r = r + (
515 508 0,
516 509 0,
517 510 revlog_constants.COMP_MODE_INLINE,
518 511 revlog_constants.COMP_MODE_INLINE,
519 512 )
520 513 return r
521 514
522 515 def pack_header(self, header):
523 516 """pack header information as binary"""
524 517 v_fmt = revlog_constants.INDEX_HEADER
525 518 return v_fmt.pack(header)
526 519
527 520 def entry_binary(self, rev):
528 521 """return the raw binary string representing a revision"""
529 522 entry = self[rev]
530 523 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
531 524 if rev == 0:
532 525 p = p[revlog_constants.INDEX_HEADER.size :]
533 526 return p
534 527
535 528
536 529 class IndexObject(BaseIndexObject):
537 530 def __init__(self, data):
538 531 assert len(data) % self.entry_size == 0, (
539 532 len(data),
540 533 self.entry_size,
541 534 len(data) % self.entry_size,
542 535 )
543 536 self._data = data
544 537 self._lgt = len(data) // self.entry_size
545 538 self._extra = []
546 539
547 540 def _calculate_index(self, i):
548 541 return i * self.entry_size
549 542
550 543 def __delitem__(self, i):
551 544 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
552 545 raise ValueError(b"deleting slices only supports a:-1 with step 1")
553 546 i = i.start
554 547 self._check_index(i)
555 548 self._stripnodes(i)
556 549 if i < self._lgt:
557 550 self._data = self._data[: i * self.entry_size]
558 551 self._lgt = i
559 552 self._extra = []
560 553 else:
561 554 self._extra = self._extra[: i - self._lgt]
562 555
563 556
564 557 class PersistentNodeMapIndexObject(IndexObject):
565 558 """a Debug oriented class to test persistent nodemap
566 559
567 560 We need a simple python object to test API and higher level behavior. See
568 561 the Rust implementation for more serious usage. This should be used only
569 562 through the dedicated `devel.persistent-nodemap` config.
570 563 """
571 564
572 565 def nodemap_data_all(self):
573 566 """Return bytes containing a full serialization of a nodemap
574 567
575 568 The nodemap should be valid for the full set of revisions in the
576 569 index."""
577 570 return nodemaputil.persistent_data(self)
578 571
579 572 def nodemap_data_incremental(self):
580 573 """Return bytes containing a incremental update to persistent nodemap
581 574
582 575 This containst the data for an append-only update of the data provided
583 576 in the last call to `update_nodemap_data`.
584 577 """
585 578 if self._nm_root is None:
586 579 return None
587 580 docket = self._nm_docket
588 581 changed, data = nodemaputil.update_persistent_data(
589 582 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
590 583 )
591 584
592 585 self._nm_root = self._nm_max_idx = self._nm_docket = None
593 586 return docket, changed, data
594 587
595 588 def update_nodemap_data(self, docket, nm_data):
596 589 """provide full block of persisted binary data for a nodemap
597 590
598 591 The data are expected to come from disk. See `nodemap_data_all` for a
599 592 produceur of such data."""
600 593 if nm_data is not None:
601 594 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
602 595 if self._nm_root:
603 596 self._nm_docket = docket
604 597 else:
605 598 self._nm_root = self._nm_max_idx = self._nm_docket = None
606 599
607 600
608 601 class InlinedIndexObject(BaseIndexObject):
609 602 def __init__(self, data, inline=0):
610 603 self._data = data
611 604 self._lgt = self._inline_scan(None)
612 605 self._inline_scan(self._lgt)
613 606 self._extra = []
614 607
615 608 def _inline_scan(self, lgt):
616 609 off = 0
617 610 if lgt is not None:
618 611 self._offsets = [0] * lgt
619 612 count = 0
620 613 while off <= len(self._data) - self.entry_size:
621 614 start = off + self.big_int_size
622 615 (s,) = struct.unpack(
623 616 b'>i',
624 617 self._data[start : start + self.int_size],
625 618 )
626 619 if lgt is not None:
627 620 self._offsets[count] = off
628 621 count += 1
629 622 off += self.entry_size + s
630 623 if off != len(self._data):
631 624 raise ValueError(b"corrupted data")
632 625 return count
633 626
634 627 def __delitem__(self, i):
635 628 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
636 629 raise ValueError(b"deleting slices only supports a:-1 with step 1")
637 630 i = i.start
638 631 self._check_index(i)
639 632 self._stripnodes(i)
640 633 if i < self._lgt:
641 634 self._offsets = self._offsets[:i]
642 635 self._lgt = i
643 636 self._extra = []
644 637 else:
645 638 self._extra = self._extra[: i - self._lgt]
646 639
647 640 def _calculate_index(self, i):
648 641 return self._offsets[i]
649 642
650 643
651 644 def parse_index2(data, inline, revlogv2=False):
652 645 if not inline:
653 646 cls = IndexObject2 if revlogv2 else IndexObject
654 647 return cls(data), None
655 648 cls = InlinedIndexObject
656 649 return cls(data, inline), (0, data)
657 650
658 651
659 652 def parse_index_cl_v2(data):
660 653 return IndexChangelogV2(data), None
661 654
662 655
663 656 class IndexObject2(IndexObject):
664 657 index_format = revlog_constants.INDEX_ENTRY_V2
665 658
666 659 def replace_sidedata_info(
667 660 self,
668 661 rev,
669 662 sidedata_offset,
670 663 sidedata_length,
671 664 offset_flags,
672 665 compression_mode,
673 666 ):
674 667 """
675 668 Replace an existing index entry's sidedata offset and length with new
676 669 ones.
677 670 This cannot be used outside of the context of sidedata rewriting,
678 671 inside the transaction that creates the revision `rev`.
679 672 """
680 673 if rev < 0:
681 674 raise KeyError
682 675 self._check_index(rev)
683 676 if rev < self._lgt:
684 677 msg = b"cannot rewrite entries outside of this transaction"
685 678 raise KeyError(msg)
686 679 else:
687 680 entry = list(self[rev])
688 681 entry[0] = offset_flags
689 682 entry[8] = sidedata_offset
690 683 entry[9] = sidedata_length
691 684 entry[11] = compression_mode
692 685 entry = tuple(entry)
693 686 new = self._pack_entry(rev, entry)
694 687 self._extra[rev - self._lgt] = new
695 688
696 689 def _unpack_entry(self, rev, data):
697 690 data = self.index_format.unpack(data)
698 691 entry = data[:10]
699 692 data_comp = data[10] & 3
700 693 sidedata_comp = (data[10] & (3 << 2)) >> 2
701 694 return entry + (data_comp, sidedata_comp)
702 695
703 696 def _pack_entry(self, rev, entry):
704 697 data = entry[:10]
705 698 data_comp = entry[10] & 3
706 699 sidedata_comp = (entry[11] & 3) << 2
707 700 data += (data_comp | sidedata_comp,)
708 701
709 702 return self.index_format.pack(*data)
710 703
711 704 def entry_binary(self, rev):
712 705 """return the raw binary string representing a revision"""
713 706 entry = self[rev]
714 707 return self._pack_entry(rev, entry)
715 708
716 709 def pack_header(self, header):
717 710 """pack header information as binary"""
718 711 msg = 'version header should go in the docket, not the index: %d'
719 712 msg %= header
720 713 raise error.ProgrammingError(msg)
721 714
722 715
723 716 class IndexChangelogV2(IndexObject2):
724 717 index_format = revlog_constants.INDEX_ENTRY_CL_V2
725 718
726 719 def _unpack_entry(self, rev, data, r=True):
727 720 items = self.index_format.unpack(data)
728 721 entry = items[:3] + (rev, rev) + items[3:8]
729 722 data_comp = items[8] & 3
730 723 sidedata_comp = (items[8] >> 2) & 3
731 724 return entry + (data_comp, sidedata_comp)
732 725
733 726 def _pack_entry(self, rev, entry):
734 727 assert entry[3] == rev, entry[3]
735 728 assert entry[4] == rev, entry[4]
736 729 data = entry[:3] + entry[5:10]
737 730 data_comp = entry[10] & 3
738 731 sidedata_comp = (entry[11] & 3) << 2
739 732 data += (data_comp | sidedata_comp,)
740 733 return self.index_format.pack(*data)
741 734
742 735
743 736 def parse_index_devel_nodemap(data, inline):
744 737 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
745 738 return PersistentNodeMapIndexObject(data), None
746 739
747 740
748 741 def parse_dirstate(dmap, copymap, st):
749 742 parents = [st[:20], st[20:40]]
750 743 # dereference fields so they will be local in loop
751 744 format = b">cllll"
752 745 e_size = struct.calcsize(format)
753 746 pos1 = 40
754 747 l = len(st)
755 748
756 749 # the inner loop
757 750 while pos1 < l:
758 751 pos2 = pos1 + e_size
759 752 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
760 753 pos1 = pos2 + e[4]
761 754 f = st[pos2:pos1]
762 755 if b'\0' in f:
763 756 f, c = f.split(b'\0')
764 757 copymap[f] = c
765 758 dmap[f] = DirstateItem.from_v1_data(*e[:4])
766 759 return parents
767 760
768 761
769 762 def pack_dirstate(dmap, copymap, pl, now):
770 763 now = int(now)
771 764 cs = stringio()
772 765 write = cs.write
773 766 write(b"".join(pl))
774 767 for f, e in pycompat.iteritems(dmap):
775 768 if e.need_delay(now):
776 769 # The file was last modified "simultaneously" with the current
777 770 # write to dirstate (i.e. within the same second for file-
778 771 # systems with a granularity of 1 sec). This commonly happens
779 772 # for at least a couple of files on 'update'.
780 773 # The user could change the file without changing its size
781 774 # within the same second. Invalidate the file's mtime in
782 775 # dirstate, forcing future 'status' calls to compare the
783 776 # contents of the file if the size is the same. This prevents
784 777 # mistakenly treating such files as clean.
785 778 e.set_possibly_dirty()
786 779
787 780 if f in copymap:
788 781 f = b"%s\0%s" % (f, copymap[f])
789 782 e = _pack(
790 783 b">cllll",
791 784 e.v1_state(),
792 785 e.v1_mode(),
793 786 e.v1_size(),
794 787 e.v1_mtime(),
795 788 len(f),
796 789 )
797 790 write(e)
798 791 write(f)
799 792 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now