##// END OF EJS Templates
dirstate-item: point out that `merged` is set only with p1_tracked...
marmoute -
r48925:e2da3ec9 default
parent child Browse files
Show More
@@ -1,841 +1,842 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from ..thirdparty import attr
18 18 from .. import (
19 19 error,
20 20 pycompat,
21 21 revlogutils,
22 22 util,
23 23 )
24 24
25 25 from ..revlogutils import nodemap as nodemaputil
26 26 from ..revlogutils import constants as revlog_constants
27 27
28 28 stringio = pycompat.bytesio
29 29
30 30
31 31 _pack = struct.pack
32 32 _unpack = struct.unpack
33 33 _compress = zlib.compress
34 34 _decompress = zlib.decompress
35 35
36 36
37 37 # a special value used internally for `size` if the file come from the other parent
38 38 FROM_P2 = -2
39 39
40 40 # a special value used internally for `size` if the file is modified/merged/added
41 41 NONNORMAL = -1
42 42
43 43 # a special value used internally for `time` if the time is ambigeous
44 44 AMBIGUOUS_TIME = -1
45 45
46 46
47 47 @attr.s(slots=True, init=False)
48 48 class DirstateItem(object):
49 49 """represent a dirstate entry
50 50
51 51 It hold multiple attributes
52 52
53 53 # about file tracking
54 54 - wc_tracked: is the file tracked by the working copy
55 55 - p1_tracked: is the file tracked in working copy first parent
56 56 - p2_tracked: is the file tracked in working copy second parent
57 57
58 58 # about what possible merge action related to this file
59 59 - clean_p1: merge picked the file content from p1
60 60 - clean_p2: merge picked the file content from p2
61 61 - merged: file gather changes from both side.
62 62
63 63 # about the file state expected from p1 manifest:
64 64 - mode: the file mode in p1
65 65 - size: the file size in p1
66 66
67 67 # about the file state on disk last time we saw it:
68 68 - mtime: the last known clean mtime for the file.
69 69
70 70 The last three item (mode, size and mtime) can be None if no meaningful (or
71 71 trusted) value exists.
72 72
73 73 """
74 74
75 75 _wc_tracked = attr.ib()
76 76 _p1_tracked = attr.ib()
77 77 _p2_tracked = attr.ib()
78 78 # the three item above should probably be combined
79 79 #
80 80 # However it is unclear if they properly cover some of the most advanced
81 81 # merge case. So we should probably wait on this to be settled.
82 82 _merged = attr.ib()
83 83 _clean_p1 = attr.ib()
84 84 _clean_p2 = attr.ib()
85 85 _possibly_dirty = attr.ib()
86 86 _mode = attr.ib()
87 87 _size = attr.ib()
88 88 _mtime = attr.ib()
89 89
90 90 def __init__(
91 91 self,
92 92 wc_tracked=False,
93 93 p1_tracked=False,
94 94 p2_tracked=False,
95 95 merged=False,
96 96 clean_p1=False,
97 97 clean_p2=False,
98 98 possibly_dirty=False,
99 99 parentfiledata=None,
100 100 ):
101 101 if merged and (clean_p1 or clean_p2):
102 102 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
103 103 raise error.ProgrammingError(msg)
104 104
105 assert not (merged and not p1_tracked)
105 106 self._wc_tracked = wc_tracked
106 107 self._p1_tracked = p1_tracked
107 108 self._p2_tracked = p2_tracked
108 109 self._merged = merged
109 110 self._clean_p1 = clean_p1
110 111 self._clean_p2 = clean_p2
111 112 self._possibly_dirty = possibly_dirty
112 113 if parentfiledata is None:
113 114 self._mode = None
114 115 self._size = None
115 116 self._mtime = None
116 117 else:
117 118 self._mode = parentfiledata[0]
118 119 self._size = parentfiledata[1]
119 120 self._mtime = parentfiledata[2]
120 121
121 122 @classmethod
122 123 def new_added(cls):
123 124 """constructor to help legacy API to build a new "added" item
124 125
125 126 Should eventually be removed
126 127 """
127 128 instance = cls()
128 129 instance._wc_tracked = True
129 130 instance._p1_tracked = False
130 131 instance._p2_tracked = False
131 132 return instance
132 133
133 134 @classmethod
134 135 def new_merged(cls):
135 136 """constructor to help legacy API to build a new "merged" item
136 137
137 138 Should eventually be removed
138 139 """
139 140 instance = cls()
140 141 instance._wc_tracked = True
141 142 instance._p1_tracked = True # might not be True because of rename ?
142 143 instance._p2_tracked = True # might not be True because of rename ?
143 144 instance._merged = True
144 145 return instance
145 146
146 147 @classmethod
147 148 def new_from_p2(cls):
148 149 """constructor to help legacy API to build a new "from_p2" item
149 150
150 151 Should eventually be removed
151 152 """
152 153 instance = cls()
153 154 instance._wc_tracked = True
154 155 instance._p1_tracked = False # might actually be True
155 156 instance._p2_tracked = True
156 157 instance._clean_p2 = True
157 158 return instance
158 159
159 160 @classmethod
160 161 def new_possibly_dirty(cls):
161 162 """constructor to help legacy API to build a new "possibly_dirty" item
162 163
163 164 Should eventually be removed
164 165 """
165 166 instance = cls()
166 167 instance._wc_tracked = True
167 168 instance._p1_tracked = True
168 169 instance._possibly_dirty = True
169 170 return instance
170 171
171 172 @classmethod
172 173 def new_normal(cls, mode, size, mtime):
173 174 """constructor to help legacy API to build a new "normal" item
174 175
175 176 Should eventually be removed
176 177 """
177 178 assert size != FROM_P2
178 179 assert size != NONNORMAL
179 180 instance = cls()
180 181 instance._wc_tracked = True
181 182 instance._p1_tracked = True
182 183 instance._mode = mode
183 184 instance._size = size
184 185 instance._mtime = mtime
185 186 return instance
186 187
187 188 @classmethod
188 189 def from_v1_data(cls, state, mode, size, mtime):
189 190 """Build a new DirstateItem object from V1 data
190 191
191 192 Since the dirstate-v1 format is frozen, the signature of this function
192 193 is not expected to change, unlike the __init__ one.
193 194 """
194 195 if state == b'm':
195 196 return cls.new_merged()
196 197 elif state == b'a':
197 198 return cls.new_added()
198 199 elif state == b'r':
199 200 instance = cls()
200 201 instance._wc_tracked = False
201 202 if size == NONNORMAL:
202 203 instance._merged = True
203 204 instance._p1_tracked = (
204 205 True # might not be True because of rename ?
205 206 )
206 207 instance._p2_tracked = (
207 208 True # might not be True because of rename ?
208 209 )
209 210 elif size == FROM_P2:
210 211 instance._clean_p2 = True
211 212 instance._p1_tracked = (
212 213 False # We actually don't know (file history)
213 214 )
214 215 instance._p2_tracked = True
215 216 else:
216 217 instance._p1_tracked = True
217 218 return instance
218 219 elif state == b'n':
219 220 if size == FROM_P2:
220 221 return cls.new_from_p2()
221 222 elif size == NONNORMAL:
222 223 return cls.new_possibly_dirty()
223 224 elif mtime == AMBIGUOUS_TIME:
224 225 instance = cls.new_normal(mode, size, 42)
225 226 instance._mtime = None
226 227 instance._possibly_dirty = True
227 228 return instance
228 229 else:
229 230 return cls.new_normal(mode, size, mtime)
230 231 else:
231 232 raise RuntimeError(b'unknown state: %s' % state)
232 233
233 234 def set_possibly_dirty(self):
234 235 """Mark a file as "possibly dirty"
235 236
236 237 This means the next status call will have to actually check its content
237 238 to make sure it is correct.
238 239 """
239 240 self._possibly_dirty = True
240 241
241 242 def set_clean(self, mode, size, mtime):
242 243 """mark a file as "clean" cancelling potential "possibly dirty call"
243 244
244 245 Note: this function is a descendant of `dirstate.normal` and is
245 246 currently expected to be call on "normal" entry only. There are not
246 247 reason for this to not change in the future as long as the ccode is
247 248 updated to preserve the proper state of the non-normal files.
248 249 """
249 250 self._wc_tracked = True
250 251 self._p1_tracked = True
251 252 self._p2_tracked = False # this might be wrong
252 253 self._merged = False
253 254 self._clean_p2 = False
254 255 self._possibly_dirty = False
255 256 self._mode = mode
256 257 self._size = size
257 258 self._mtime = mtime
258 259
259 260 def set_tracked(self):
260 261 """mark a file as tracked in the working copy
261 262
262 263 This will ultimately be called by command like `hg add`.
263 264 """
264 265 self._wc_tracked = True
265 266 # `set_tracked` is replacing various `normallookup` call. So we set
266 267 # "possibly dirty" to stay on the safe side.
267 268 #
268 269 # Consider dropping this in the future in favor of something less broad.
269 270 self._possibly_dirty = True
270 271
271 272 def set_untracked(self):
272 273 """mark a file as untracked in the working copy
273 274
274 275 This will ultimately be called by command like `hg remove`.
275 276 """
276 277 self._wc_tracked = False
277 278 self._mode = None
278 279 self._size = None
279 280 self._mtime = None
280 281
281 282 def drop_merge_data(self):
282 283 """remove all "merge-only" from a DirstateItem
283 284
284 285 This is to be call by the dirstatemap code when the second parent is dropped
285 286 """
286 287 if not (self.merged or self.from_p2):
287 288 return
288 289 self._p1_tracked = self.merged # why is this not already properly set ?
289 290
290 291 self._merged = False
291 292 self._clean_p1 = False
292 293 self._clean_p2 = False
293 294 self._p2_tracked = False
294 295 self._possibly_dirty = True
295 296 self._mode = None
296 297 self._size = None
297 298 self._mtime = None
298 299
299 300 @property
300 301 def mode(self):
301 302 return self.v1_mode()
302 303
303 304 @property
304 305 def size(self):
305 306 return self.v1_size()
306 307
307 308 @property
308 309 def mtime(self):
309 310 return self.v1_mtime()
310 311
311 312 @property
312 313 def state(self):
313 314 """
314 315 States are:
315 316 n normal
316 317 m needs merging
317 318 r marked for removal
318 319 a marked for addition
319 320
320 321 XXX This "state" is a bit obscure and mostly a direct expression of the
321 322 dirstatev1 format. It would make sense to ultimately deprecate it in
322 323 favor of the more "semantic" attributes.
323 324 """
324 325 if not self.any_tracked:
325 326 return b'?'
326 327 return self.v1_state()
327 328
328 329 @property
329 330 def tracked(self):
330 331 """True is the file is tracked in the working copy"""
331 332 return self._wc_tracked
332 333
333 334 @property
334 335 def any_tracked(self):
335 336 """True is the file is tracked anywhere (wc or parents)"""
336 337 return self._wc_tracked or self._p1_tracked or self._p2_tracked
337 338
338 339 @property
339 340 def added(self):
340 341 """True if the file has been added"""
341 342 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
342 343
343 344 @property
344 345 def maybe_clean(self):
345 346 """True if the file has a chance to be in the "clean" state"""
346 347 if not self._wc_tracked:
347 348 return False
348 349 elif self.added:
349 350 return False
350 351 elif self._merged:
351 352 return False
352 353 elif self._clean_p2:
353 354 return False
354 355 return True
355 356
356 357 @property
357 358 def merged(self):
358 359 """True if the file has been merged
359 360
360 361 Should only be set if a merge is in progress in the dirstate
361 362 """
362 363 return self._wc_tracked and self._merged
363 364
364 365 @property
365 366 def from_p2(self):
366 367 """True if the file have been fetched from p2 during the current merge
367 368
368 369 This is only True is the file is currently tracked.
369 370
370 371 Should only be set if a merge is in progress in the dirstate
371 372 """
372 373 if not self._wc_tracked:
373 374 return False
374 375 return self._clean_p2
375 376
376 377 @property
377 378 def removed(self):
378 379 """True if the file has been removed"""
379 380 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
380 381
381 382 def v1_state(self):
382 383 """return a "state" suitable for v1 serialization"""
383 384 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
384 385 # the object has no state to record, this is -currently-
385 386 # unsupported
386 387 raise RuntimeError('untracked item')
387 388 elif self.removed:
388 389 return b'r'
389 390 elif self.merged:
390 391 return b'm'
391 392 elif self.added:
392 393 return b'a'
393 394 else:
394 395 return b'n'
395 396
396 397 def v1_mode(self):
397 398 """return a "mode" suitable for v1 serialization"""
398 399 return self._mode if self._mode is not None else 0
399 400
400 401 def v1_size(self):
401 402 """return a "size" suitable for v1 serialization"""
402 403 if not self.any_tracked:
403 404 # the object has no state to record, this is -currently-
404 405 # unsupported
405 406 raise RuntimeError('untracked item')
406 407 elif self.removed and self._merged:
407 408 return NONNORMAL
408 409 elif self.removed and self._clean_p2:
409 410 return FROM_P2
410 411 elif self.removed:
411 412 return 0
412 413 elif self.merged:
413 414 return FROM_P2
414 415 elif self.added:
415 416 return NONNORMAL
416 417 elif self.from_p2:
417 418 return FROM_P2
418 419 elif self._possibly_dirty:
419 420 return self._size if self._size is not None else NONNORMAL
420 421 else:
421 422 return self._size
422 423
423 424 def v1_mtime(self):
424 425 """return a "mtime" suitable for v1 serialization"""
425 426 if not self.any_tracked:
426 427 # the object has no state to record, this is -currently-
427 428 # unsupported
428 429 raise RuntimeError('untracked item')
429 430 elif self.removed:
430 431 return 0
431 432 elif self._possibly_dirty:
432 433 return AMBIGUOUS_TIME
433 434 elif self.merged:
434 435 return AMBIGUOUS_TIME
435 436 elif self.added:
436 437 return AMBIGUOUS_TIME
437 438 elif self.from_p2:
438 439 return AMBIGUOUS_TIME
439 440 else:
440 441 return self._mtime if self._mtime is not None else 0
441 442
442 443 def need_delay(self, now):
443 444 """True if the stored mtime would be ambiguous with the current time"""
444 445 return self.v1_state() == b'n' and self.v1_mtime() == now
445 446
446 447
447 448 def gettype(q):
448 449 return int(q & 0xFFFF)
449 450
450 451
451 452 class BaseIndexObject(object):
452 453 # Can I be passed to an algorithme implemented in Rust ?
453 454 rust_ext_compat = 0
454 455 # Format of an index entry according to Python's `struct` language
455 456 index_format = revlog_constants.INDEX_ENTRY_V1
456 457 # Size of a C unsigned long long int, platform independent
457 458 big_int_size = struct.calcsize(b'>Q')
458 459 # Size of a C long int, platform independent
459 460 int_size = struct.calcsize(b'>i')
460 461 # An empty index entry, used as a default value to be overridden, or nullrev
461 462 null_item = (
462 463 0,
463 464 0,
464 465 0,
465 466 -1,
466 467 -1,
467 468 -1,
468 469 -1,
469 470 sha1nodeconstants.nullid,
470 471 0,
471 472 0,
472 473 revlog_constants.COMP_MODE_INLINE,
473 474 revlog_constants.COMP_MODE_INLINE,
474 475 )
475 476
476 477 @util.propertycache
477 478 def entry_size(self):
478 479 return self.index_format.size
479 480
480 481 @property
481 482 def nodemap(self):
482 483 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
483 484 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
484 485 return self._nodemap
485 486
486 487 @util.propertycache
487 488 def _nodemap(self):
488 489 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
489 490 for r in range(0, len(self)):
490 491 n = self[r][7]
491 492 nodemap[n] = r
492 493 return nodemap
493 494
494 495 def has_node(self, node):
495 496 """return True if the node exist in the index"""
496 497 return node in self._nodemap
497 498
498 499 def rev(self, node):
499 500 """return a revision for a node
500 501
501 502 If the node is unknown, raise a RevlogError"""
502 503 return self._nodemap[node]
503 504
504 505 def get_rev(self, node):
505 506 """return a revision for a node
506 507
507 508 If the node is unknown, return None"""
508 509 return self._nodemap.get(node)
509 510
510 511 def _stripnodes(self, start):
511 512 if '_nodemap' in vars(self):
512 513 for r in range(start, len(self)):
513 514 n = self[r][7]
514 515 del self._nodemap[n]
515 516
516 517 def clearcaches(self):
517 518 self.__dict__.pop('_nodemap', None)
518 519
519 520 def __len__(self):
520 521 return self._lgt + len(self._extra)
521 522
522 523 def append(self, tup):
523 524 if '_nodemap' in vars(self):
524 525 self._nodemap[tup[7]] = len(self)
525 526 data = self._pack_entry(len(self), tup)
526 527 self._extra.append(data)
527 528
528 529 def _pack_entry(self, rev, entry):
529 530 assert entry[8] == 0
530 531 assert entry[9] == 0
531 532 return self.index_format.pack(*entry[:8])
532 533
533 534 def _check_index(self, i):
534 535 if not isinstance(i, int):
535 536 raise TypeError(b"expecting int indexes")
536 537 if i < 0 or i >= len(self):
537 538 raise IndexError
538 539
539 540 def __getitem__(self, i):
540 541 if i == -1:
541 542 return self.null_item
542 543 self._check_index(i)
543 544 if i >= self._lgt:
544 545 data = self._extra[i - self._lgt]
545 546 else:
546 547 index = self._calculate_index(i)
547 548 data = self._data[index : index + self.entry_size]
548 549 r = self._unpack_entry(i, data)
549 550 if self._lgt and i == 0:
550 551 offset = revlogutils.offset_type(0, gettype(r[0]))
551 552 r = (offset,) + r[1:]
552 553 return r
553 554
554 555 def _unpack_entry(self, rev, data):
555 556 r = self.index_format.unpack(data)
556 557 r = r + (
557 558 0,
558 559 0,
559 560 revlog_constants.COMP_MODE_INLINE,
560 561 revlog_constants.COMP_MODE_INLINE,
561 562 )
562 563 return r
563 564
564 565 def pack_header(self, header):
565 566 """pack header information as binary"""
566 567 v_fmt = revlog_constants.INDEX_HEADER
567 568 return v_fmt.pack(header)
568 569
569 570 def entry_binary(self, rev):
570 571 """return the raw binary string representing a revision"""
571 572 entry = self[rev]
572 573 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
573 574 if rev == 0:
574 575 p = p[revlog_constants.INDEX_HEADER.size :]
575 576 return p
576 577
577 578
578 579 class IndexObject(BaseIndexObject):
579 580 def __init__(self, data):
580 581 assert len(data) % self.entry_size == 0, (
581 582 len(data),
582 583 self.entry_size,
583 584 len(data) % self.entry_size,
584 585 )
585 586 self._data = data
586 587 self._lgt = len(data) // self.entry_size
587 588 self._extra = []
588 589
589 590 def _calculate_index(self, i):
590 591 return i * self.entry_size
591 592
592 593 def __delitem__(self, i):
593 594 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
594 595 raise ValueError(b"deleting slices only supports a:-1 with step 1")
595 596 i = i.start
596 597 self._check_index(i)
597 598 self._stripnodes(i)
598 599 if i < self._lgt:
599 600 self._data = self._data[: i * self.entry_size]
600 601 self._lgt = i
601 602 self._extra = []
602 603 else:
603 604 self._extra = self._extra[: i - self._lgt]
604 605
605 606
606 607 class PersistentNodeMapIndexObject(IndexObject):
607 608 """a Debug oriented class to test persistent nodemap
608 609
609 610 We need a simple python object to test API and higher level behavior. See
610 611 the Rust implementation for more serious usage. This should be used only
611 612 through the dedicated `devel.persistent-nodemap` config.
612 613 """
613 614
614 615 def nodemap_data_all(self):
615 616 """Return bytes containing a full serialization of a nodemap
616 617
617 618 The nodemap should be valid for the full set of revisions in the
618 619 index."""
619 620 return nodemaputil.persistent_data(self)
620 621
621 622 def nodemap_data_incremental(self):
622 623 """Return bytes containing a incremental update to persistent nodemap
623 624
624 625 This containst the data for an append-only update of the data provided
625 626 in the last call to `update_nodemap_data`.
626 627 """
627 628 if self._nm_root is None:
628 629 return None
629 630 docket = self._nm_docket
630 631 changed, data = nodemaputil.update_persistent_data(
631 632 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
632 633 )
633 634
634 635 self._nm_root = self._nm_max_idx = self._nm_docket = None
635 636 return docket, changed, data
636 637
637 638 def update_nodemap_data(self, docket, nm_data):
638 639 """provide full block of persisted binary data for a nodemap
639 640
640 641 The data are expected to come from disk. See `nodemap_data_all` for a
641 642 produceur of such data."""
642 643 if nm_data is not None:
643 644 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
644 645 if self._nm_root:
645 646 self._nm_docket = docket
646 647 else:
647 648 self._nm_root = self._nm_max_idx = self._nm_docket = None
648 649
649 650
650 651 class InlinedIndexObject(BaseIndexObject):
651 652 def __init__(self, data, inline=0):
652 653 self._data = data
653 654 self._lgt = self._inline_scan(None)
654 655 self._inline_scan(self._lgt)
655 656 self._extra = []
656 657
657 658 def _inline_scan(self, lgt):
658 659 off = 0
659 660 if lgt is not None:
660 661 self._offsets = [0] * lgt
661 662 count = 0
662 663 while off <= len(self._data) - self.entry_size:
663 664 start = off + self.big_int_size
664 665 (s,) = struct.unpack(
665 666 b'>i',
666 667 self._data[start : start + self.int_size],
667 668 )
668 669 if lgt is not None:
669 670 self._offsets[count] = off
670 671 count += 1
671 672 off += self.entry_size + s
672 673 if off != len(self._data):
673 674 raise ValueError(b"corrupted data")
674 675 return count
675 676
676 677 def __delitem__(self, i):
677 678 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
678 679 raise ValueError(b"deleting slices only supports a:-1 with step 1")
679 680 i = i.start
680 681 self._check_index(i)
681 682 self._stripnodes(i)
682 683 if i < self._lgt:
683 684 self._offsets = self._offsets[:i]
684 685 self._lgt = i
685 686 self._extra = []
686 687 else:
687 688 self._extra = self._extra[: i - self._lgt]
688 689
689 690 def _calculate_index(self, i):
690 691 return self._offsets[i]
691 692
692 693
693 694 def parse_index2(data, inline, revlogv2=False):
694 695 if not inline:
695 696 cls = IndexObject2 if revlogv2 else IndexObject
696 697 return cls(data), None
697 698 cls = InlinedIndexObject
698 699 return cls(data, inline), (0, data)
699 700
700 701
701 702 def parse_index_cl_v2(data):
702 703 return IndexChangelogV2(data), None
703 704
704 705
705 706 class IndexObject2(IndexObject):
706 707 index_format = revlog_constants.INDEX_ENTRY_V2
707 708
708 709 def replace_sidedata_info(
709 710 self,
710 711 rev,
711 712 sidedata_offset,
712 713 sidedata_length,
713 714 offset_flags,
714 715 compression_mode,
715 716 ):
716 717 """
717 718 Replace an existing index entry's sidedata offset and length with new
718 719 ones.
719 720 This cannot be used outside of the context of sidedata rewriting,
720 721 inside the transaction that creates the revision `rev`.
721 722 """
722 723 if rev < 0:
723 724 raise KeyError
724 725 self._check_index(rev)
725 726 if rev < self._lgt:
726 727 msg = b"cannot rewrite entries outside of this transaction"
727 728 raise KeyError(msg)
728 729 else:
729 730 entry = list(self[rev])
730 731 entry[0] = offset_flags
731 732 entry[8] = sidedata_offset
732 733 entry[9] = sidedata_length
733 734 entry[11] = compression_mode
734 735 entry = tuple(entry)
735 736 new = self._pack_entry(rev, entry)
736 737 self._extra[rev - self._lgt] = new
737 738
738 739 def _unpack_entry(self, rev, data):
739 740 data = self.index_format.unpack(data)
740 741 entry = data[:10]
741 742 data_comp = data[10] & 3
742 743 sidedata_comp = (data[10] & (3 << 2)) >> 2
743 744 return entry + (data_comp, sidedata_comp)
744 745
745 746 def _pack_entry(self, rev, entry):
746 747 data = entry[:10]
747 748 data_comp = entry[10] & 3
748 749 sidedata_comp = (entry[11] & 3) << 2
749 750 data += (data_comp | sidedata_comp,)
750 751
751 752 return self.index_format.pack(*data)
752 753
753 754 def entry_binary(self, rev):
754 755 """return the raw binary string representing a revision"""
755 756 entry = self[rev]
756 757 return self._pack_entry(rev, entry)
757 758
758 759 def pack_header(self, header):
759 760 """pack header information as binary"""
760 761 msg = 'version header should go in the docket, not the index: %d'
761 762 msg %= header
762 763 raise error.ProgrammingError(msg)
763 764
764 765
765 766 class IndexChangelogV2(IndexObject2):
766 767 index_format = revlog_constants.INDEX_ENTRY_CL_V2
767 768
768 769 def _unpack_entry(self, rev, data, r=True):
769 770 items = self.index_format.unpack(data)
770 771 entry = items[:3] + (rev, rev) + items[3:8]
771 772 data_comp = items[8] & 3
772 773 sidedata_comp = (items[8] >> 2) & 3
773 774 return entry + (data_comp, sidedata_comp)
774 775
775 776 def _pack_entry(self, rev, entry):
776 777 assert entry[3] == rev, entry[3]
777 778 assert entry[4] == rev, entry[4]
778 779 data = entry[:3] + entry[5:10]
779 780 data_comp = entry[10] & 3
780 781 sidedata_comp = (entry[11] & 3) << 2
781 782 data += (data_comp | sidedata_comp,)
782 783 return self.index_format.pack(*data)
783 784
784 785
785 786 def parse_index_devel_nodemap(data, inline):
786 787 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
787 788 return PersistentNodeMapIndexObject(data), None
788 789
789 790
790 791 def parse_dirstate(dmap, copymap, st):
791 792 parents = [st[:20], st[20:40]]
792 793 # dereference fields so they will be local in loop
793 794 format = b">cllll"
794 795 e_size = struct.calcsize(format)
795 796 pos1 = 40
796 797 l = len(st)
797 798
798 799 # the inner loop
799 800 while pos1 < l:
800 801 pos2 = pos1 + e_size
801 802 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
802 803 pos1 = pos2 + e[4]
803 804 f = st[pos2:pos1]
804 805 if b'\0' in f:
805 806 f, c = f.split(b'\0')
806 807 copymap[f] = c
807 808 dmap[f] = DirstateItem.from_v1_data(*e[:4])
808 809 return parents
809 810
810 811
811 812 def pack_dirstate(dmap, copymap, pl, now):
812 813 now = int(now)
813 814 cs = stringio()
814 815 write = cs.write
815 816 write(b"".join(pl))
816 817 for f, e in pycompat.iteritems(dmap):
817 818 if e.need_delay(now):
818 819 # The file was last modified "simultaneously" with the current
819 820 # write to dirstate (i.e. within the same second for file-
820 821 # systems with a granularity of 1 sec). This commonly happens
821 822 # for at least a couple of files on 'update'.
822 823 # The user could change the file without changing its size
823 824 # within the same second. Invalidate the file's mtime in
824 825 # dirstate, forcing future 'status' calls to compare the
825 826 # contents of the file if the size is the same. This prevents
826 827 # mistakenly treating such files as clean.
827 828 e.set_possibly_dirty()
828 829
829 830 if f in copymap:
830 831 f = b"%s\0%s" % (f, copymap[f])
831 832 e = _pack(
832 833 b">cllll",
833 834 e.v1_state(),
834 835 e.v1_mode(),
835 836 e.v1_size(),
836 837 e.v1_mtime(),
837 838 len(f),
838 839 )
839 840 write(e)
840 841 write(f)
841 842 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now