##// END OF EJS Templates
dirstate-item: replace a call to new_normal...
marmoute -
r48974:79ebbe19 default
parent child Browse files
Show More
@@ -1,743 +1,746 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from ..thirdparty import attr
18 18 from .. import (
19 19 error,
20 20 pycompat,
21 21 revlogutils,
22 22 util,
23 23 )
24 24
25 25 from ..revlogutils import nodemap as nodemaputil
26 26 from ..revlogutils import constants as revlog_constants
27 27
28 28 stringio = pycompat.bytesio
29 29
30 30
31 31 _pack = struct.pack
32 32 _unpack = struct.unpack
33 33 _compress = zlib.compress
34 34 _decompress = zlib.decompress
35 35
36 36
37 37 # a special value used internally for `size` if the file come from the other parent
38 38 FROM_P2 = -2
39 39
40 40 # a special value used internally for `size` if the file is modified/merged/added
41 41 NONNORMAL = -1
42 42
43 43 # a special value used internally for `time` if the time is ambigeous
44 44 AMBIGUOUS_TIME = -1
45 45
46 46
47 47 @attr.s(slots=True, init=False)
48 48 class DirstateItem(object):
49 49 """represent a dirstate entry
50 50
51 51 It hold multiple attributes
52 52
53 53 # about file tracking
54 54 - wc_tracked: is the file tracked by the working copy
55 55 - p1_tracked: is the file tracked in working copy first parent
56 56 - p2_info: the file has been involved in some merge operation. Either
57 57 because it was actually merged, or because the p2 version was
58 58 ahead, or because some renamed moved it there. In either case
59 59 `hg status` will want it displayed as modified.
60 60
61 61 # about the file state expected from p1 manifest:
62 62 - mode: the file mode in p1
63 63 - size: the file size in p1
64 64
65 65 These value can be set to None, which mean we don't have a meaningful value
66 66 to compare with. Either because we don't really care about them as there
67 67 `status` is known without having to look at the disk or because we don't
68 68 know these right now and a full comparison will be needed to find out if
69 69 the file is clean.
70 70
71 71 # about the file state on disk last time we saw it:
72 72 - mtime: the last known clean mtime for the file.
73 73
74 74 This value can be set to None if no cachable state exist. Either because we
75 75 do not care (see previous section) or because we could not cache something
76 76 yet.
77 77 """
78 78
79 79 _wc_tracked = attr.ib()
80 80 _p1_tracked = attr.ib()
81 81 _p2_info = attr.ib()
82 82 _mode = attr.ib()
83 83 _size = attr.ib()
84 84 _mtime = attr.ib()
85 85
86 86 def __init__(
87 87 self,
88 88 wc_tracked=False,
89 89 p1_tracked=False,
90 90 p2_info=False,
91 91 has_meaningful_data=True,
92 92 has_meaningful_mtime=True,
93 93 parentfiledata=None,
94 94 ):
95 95 self._wc_tracked = wc_tracked
96 96 self._p1_tracked = p1_tracked
97 97 self._p2_info = p2_info
98 98
99 99 self._mode = None
100 100 self._size = None
101 101 self._mtime = None
102 102 if parentfiledata is None:
103 103 has_meaningful_mtime = False
104 104 has_meaningful_data = False
105 105 if has_meaningful_data:
106 106 self._mode = parentfiledata[0]
107 107 self._size = parentfiledata[1]
108 108 if has_meaningful_mtime:
109 109 self._mtime = parentfiledata[2]
110 110
111 111 @classmethod
112 112 def new_normal(cls, mode, size, mtime):
113 113 """constructor to help legacy API to build a new "normal" item
114 114
115 115 Should eventually be removed
116 116 """
117 117 assert size != FROM_P2
118 118 assert size != NONNORMAL
119 119 return cls(
120 120 wc_tracked=True,
121 121 p1_tracked=True,
122 122 parentfiledata=(mode, size, mtime),
123 123 )
124 124
125 125 @classmethod
126 126 def from_v1_data(cls, state, mode, size, mtime):
127 127 """Build a new DirstateItem object from V1 data
128 128
129 129 Since the dirstate-v1 format is frozen, the signature of this function
130 130 is not expected to change, unlike the __init__ one.
131 131 """
132 132 if state == b'm':
133 133 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
134 134 elif state == b'a':
135 135 return cls(wc_tracked=True)
136 136 elif state == b'r':
137 137 if size == NONNORMAL:
138 138 p1_tracked = True
139 139 p2_info = True
140 140 elif size == FROM_P2:
141 141 p1_tracked = False
142 142 p2_info = True
143 143 else:
144 144 p1_tracked = True
145 145 p2_info = False
146 146 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
147 147 elif state == b'n':
148 148 if size == FROM_P2:
149 149 return cls(wc_tracked=True, p2_info=True)
150 150 elif size == NONNORMAL:
151 151 return cls(wc_tracked=True, p1_tracked=True)
152 152 elif mtime == AMBIGUOUS_TIME:
153 instance = cls.new_normal(mode, size, 42)
154 instance._mtime = None
155 return instance
153 return cls(
154 wc_tracked=True,
155 p1_tracked=True,
156 has_meaningful_mtime=False,
157 parentfiledata=(mode, size, 42),
158 )
156 159 else:
157 160 return cls.new_normal(mode, size, mtime)
158 161 else:
159 162 raise RuntimeError(b'unknown state: %s' % state)
160 163
161 164 def set_possibly_dirty(self):
162 165 """Mark a file as "possibly dirty"
163 166
164 167 This means the next status call will have to actually check its content
165 168 to make sure it is correct.
166 169 """
167 170 self._mtime = None
168 171
169 172 def set_clean(self, mode, size, mtime):
170 173 """mark a file as "clean" cancelling potential "possibly dirty call"
171 174
172 175 Note: this function is a descendant of `dirstate.normal` and is
173 176 currently expected to be call on "normal" entry only. There are not
174 177 reason for this to not change in the future as long as the ccode is
175 178 updated to preserve the proper state of the non-normal files.
176 179 """
177 180 self._wc_tracked = True
178 181 self._p1_tracked = True
179 182 self._mode = mode
180 183 self._size = size
181 184 self._mtime = mtime
182 185
183 186 def set_tracked(self):
184 187 """mark a file as tracked in the working copy
185 188
186 189 This will ultimately be called by command like `hg add`.
187 190 """
188 191 self._wc_tracked = True
189 192 # `set_tracked` is replacing various `normallookup` call. So we mark
190 193 # the files as needing lookup
191 194 #
192 195 # Consider dropping this in the future in favor of something less broad.
193 196 self._mtime = None
194 197
195 198 def set_untracked(self):
196 199 """mark a file as untracked in the working copy
197 200
198 201 This will ultimately be called by command like `hg remove`.
199 202 """
200 203 self._wc_tracked = False
201 204 self._mode = None
202 205 self._size = None
203 206 self._mtime = None
204 207
205 208 def drop_merge_data(self):
206 209 """remove all "merge-only" from a DirstateItem
207 210
208 211 This is to be call by the dirstatemap code when the second parent is dropped
209 212 """
210 213 if self._p2_info:
211 214 self._p2_info = False
212 215 self._mode = None
213 216 self._size = None
214 217 self._mtime = None
215 218
216 219 @property
217 220 def mode(self):
218 221 return self.v1_mode()
219 222
220 223 @property
221 224 def size(self):
222 225 return self.v1_size()
223 226
224 227 @property
225 228 def mtime(self):
226 229 return self.v1_mtime()
227 230
228 231 @property
229 232 def state(self):
230 233 """
231 234 States are:
232 235 n normal
233 236 m needs merging
234 237 r marked for removal
235 238 a marked for addition
236 239
237 240 XXX This "state" is a bit obscure and mostly a direct expression of the
238 241 dirstatev1 format. It would make sense to ultimately deprecate it in
239 242 favor of the more "semantic" attributes.
240 243 """
241 244 if not self.any_tracked:
242 245 return b'?'
243 246 return self.v1_state()
244 247
245 248 @property
246 249 def tracked(self):
247 250 """True is the file is tracked in the working copy"""
248 251 return self._wc_tracked
249 252
250 253 @property
251 254 def any_tracked(self):
252 255 """True is the file is tracked anywhere (wc or parents)"""
253 256 return self._wc_tracked or self._p1_tracked or self._p2_info
254 257
255 258 @property
256 259 def added(self):
257 260 """True if the file has been added"""
258 261 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
259 262
260 263 @property
261 264 def maybe_clean(self):
262 265 """True if the file has a chance to be in the "clean" state"""
263 266 if not self._wc_tracked:
264 267 return False
265 268 elif not self._p1_tracked:
266 269 return False
267 270 elif self._p2_info:
268 271 return False
269 272 return True
270 273
271 274 @property
272 275 def p1_tracked(self):
273 276 """True if the file is tracked in the first parent manifest"""
274 277 return self._p1_tracked
275 278
276 279 @property
277 280 def p2_info(self):
278 281 """True if the file needed to merge or apply any input from p2
279 282
280 283 See the class documentation for details.
281 284 """
282 285 return self._wc_tracked and self._p2_info
283 286
284 287 @property
285 288 def removed(self):
286 289 """True if the file has been removed"""
287 290 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
288 291
289 292 def v1_state(self):
290 293 """return a "state" suitable for v1 serialization"""
291 294 if not self.any_tracked:
292 295 # the object has no state to record, this is -currently-
293 296 # unsupported
294 297 raise RuntimeError('untracked item')
295 298 elif self.removed:
296 299 return b'r'
297 300 elif self._p1_tracked and self._p2_info:
298 301 return b'm'
299 302 elif self.added:
300 303 return b'a'
301 304 else:
302 305 return b'n'
303 306
304 307 def v1_mode(self):
305 308 """return a "mode" suitable for v1 serialization"""
306 309 return self._mode if self._mode is not None else 0
307 310
308 311 def v1_size(self):
309 312 """return a "size" suitable for v1 serialization"""
310 313 if not self.any_tracked:
311 314 # the object has no state to record, this is -currently-
312 315 # unsupported
313 316 raise RuntimeError('untracked item')
314 317 elif self.removed and self._p1_tracked and self._p2_info:
315 318 return NONNORMAL
316 319 elif self._p2_info:
317 320 return FROM_P2
318 321 elif self.removed:
319 322 return 0
320 323 elif self.added:
321 324 return NONNORMAL
322 325 elif self._size is None:
323 326 return NONNORMAL
324 327 else:
325 328 return self._size
326 329
327 330 def v1_mtime(self):
328 331 """return a "mtime" suitable for v1 serialization"""
329 332 if not self.any_tracked:
330 333 # the object has no state to record, this is -currently-
331 334 # unsupported
332 335 raise RuntimeError('untracked item')
333 336 elif self.removed:
334 337 return 0
335 338 elif self._mtime is None:
336 339 return AMBIGUOUS_TIME
337 340 elif self._p2_info:
338 341 return AMBIGUOUS_TIME
339 342 elif not self._p1_tracked:
340 343 return AMBIGUOUS_TIME
341 344 else:
342 345 return self._mtime
343 346
344 347 def need_delay(self, now):
345 348 """True if the stored mtime would be ambiguous with the current time"""
346 349 return self.v1_state() == b'n' and self.v1_mtime() == now
347 350
348 351
349 352 def gettype(q):
350 353 return int(q & 0xFFFF)
351 354
352 355
353 356 class BaseIndexObject(object):
354 357 # Can I be passed to an algorithme implemented in Rust ?
355 358 rust_ext_compat = 0
356 359 # Format of an index entry according to Python's `struct` language
357 360 index_format = revlog_constants.INDEX_ENTRY_V1
358 361 # Size of a C unsigned long long int, platform independent
359 362 big_int_size = struct.calcsize(b'>Q')
360 363 # Size of a C long int, platform independent
361 364 int_size = struct.calcsize(b'>i')
362 365 # An empty index entry, used as a default value to be overridden, or nullrev
363 366 null_item = (
364 367 0,
365 368 0,
366 369 0,
367 370 -1,
368 371 -1,
369 372 -1,
370 373 -1,
371 374 sha1nodeconstants.nullid,
372 375 0,
373 376 0,
374 377 revlog_constants.COMP_MODE_INLINE,
375 378 revlog_constants.COMP_MODE_INLINE,
376 379 )
377 380
378 381 @util.propertycache
379 382 def entry_size(self):
380 383 return self.index_format.size
381 384
382 385 @property
383 386 def nodemap(self):
384 387 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
385 388 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
386 389 return self._nodemap
387 390
388 391 @util.propertycache
389 392 def _nodemap(self):
390 393 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
391 394 for r in range(0, len(self)):
392 395 n = self[r][7]
393 396 nodemap[n] = r
394 397 return nodemap
395 398
396 399 def has_node(self, node):
397 400 """return True if the node exist in the index"""
398 401 return node in self._nodemap
399 402
400 403 def rev(self, node):
401 404 """return a revision for a node
402 405
403 406 If the node is unknown, raise a RevlogError"""
404 407 return self._nodemap[node]
405 408
406 409 def get_rev(self, node):
407 410 """return a revision for a node
408 411
409 412 If the node is unknown, return None"""
410 413 return self._nodemap.get(node)
411 414
412 415 def _stripnodes(self, start):
413 416 if '_nodemap' in vars(self):
414 417 for r in range(start, len(self)):
415 418 n = self[r][7]
416 419 del self._nodemap[n]
417 420
418 421 def clearcaches(self):
419 422 self.__dict__.pop('_nodemap', None)
420 423
421 424 def __len__(self):
422 425 return self._lgt + len(self._extra)
423 426
424 427 def append(self, tup):
425 428 if '_nodemap' in vars(self):
426 429 self._nodemap[tup[7]] = len(self)
427 430 data = self._pack_entry(len(self), tup)
428 431 self._extra.append(data)
429 432
430 433 def _pack_entry(self, rev, entry):
431 434 assert entry[8] == 0
432 435 assert entry[9] == 0
433 436 return self.index_format.pack(*entry[:8])
434 437
435 438 def _check_index(self, i):
436 439 if not isinstance(i, int):
437 440 raise TypeError(b"expecting int indexes")
438 441 if i < 0 or i >= len(self):
439 442 raise IndexError
440 443
441 444 def __getitem__(self, i):
442 445 if i == -1:
443 446 return self.null_item
444 447 self._check_index(i)
445 448 if i >= self._lgt:
446 449 data = self._extra[i - self._lgt]
447 450 else:
448 451 index = self._calculate_index(i)
449 452 data = self._data[index : index + self.entry_size]
450 453 r = self._unpack_entry(i, data)
451 454 if self._lgt and i == 0:
452 455 offset = revlogutils.offset_type(0, gettype(r[0]))
453 456 r = (offset,) + r[1:]
454 457 return r
455 458
456 459 def _unpack_entry(self, rev, data):
457 460 r = self.index_format.unpack(data)
458 461 r = r + (
459 462 0,
460 463 0,
461 464 revlog_constants.COMP_MODE_INLINE,
462 465 revlog_constants.COMP_MODE_INLINE,
463 466 )
464 467 return r
465 468
466 469 def pack_header(self, header):
467 470 """pack header information as binary"""
468 471 v_fmt = revlog_constants.INDEX_HEADER
469 472 return v_fmt.pack(header)
470 473
471 474 def entry_binary(self, rev):
472 475 """return the raw binary string representing a revision"""
473 476 entry = self[rev]
474 477 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
475 478 if rev == 0:
476 479 p = p[revlog_constants.INDEX_HEADER.size :]
477 480 return p
478 481
479 482
480 483 class IndexObject(BaseIndexObject):
481 484 def __init__(self, data):
482 485 assert len(data) % self.entry_size == 0, (
483 486 len(data),
484 487 self.entry_size,
485 488 len(data) % self.entry_size,
486 489 )
487 490 self._data = data
488 491 self._lgt = len(data) // self.entry_size
489 492 self._extra = []
490 493
491 494 def _calculate_index(self, i):
492 495 return i * self.entry_size
493 496
494 497 def __delitem__(self, i):
495 498 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
496 499 raise ValueError(b"deleting slices only supports a:-1 with step 1")
497 500 i = i.start
498 501 self._check_index(i)
499 502 self._stripnodes(i)
500 503 if i < self._lgt:
501 504 self._data = self._data[: i * self.entry_size]
502 505 self._lgt = i
503 506 self._extra = []
504 507 else:
505 508 self._extra = self._extra[: i - self._lgt]
506 509
507 510
508 511 class PersistentNodeMapIndexObject(IndexObject):
509 512 """a Debug oriented class to test persistent nodemap
510 513
511 514 We need a simple python object to test API and higher level behavior. See
512 515 the Rust implementation for more serious usage. This should be used only
513 516 through the dedicated `devel.persistent-nodemap` config.
514 517 """
515 518
516 519 def nodemap_data_all(self):
517 520 """Return bytes containing a full serialization of a nodemap
518 521
519 522 The nodemap should be valid for the full set of revisions in the
520 523 index."""
521 524 return nodemaputil.persistent_data(self)
522 525
523 526 def nodemap_data_incremental(self):
524 527 """Return bytes containing a incremental update to persistent nodemap
525 528
526 529 This containst the data for an append-only update of the data provided
527 530 in the last call to `update_nodemap_data`.
528 531 """
529 532 if self._nm_root is None:
530 533 return None
531 534 docket = self._nm_docket
532 535 changed, data = nodemaputil.update_persistent_data(
533 536 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
534 537 )
535 538
536 539 self._nm_root = self._nm_max_idx = self._nm_docket = None
537 540 return docket, changed, data
538 541
539 542 def update_nodemap_data(self, docket, nm_data):
540 543 """provide full block of persisted binary data for a nodemap
541 544
542 545 The data are expected to come from disk. See `nodemap_data_all` for a
543 546 produceur of such data."""
544 547 if nm_data is not None:
545 548 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
546 549 if self._nm_root:
547 550 self._nm_docket = docket
548 551 else:
549 552 self._nm_root = self._nm_max_idx = self._nm_docket = None
550 553
551 554
552 555 class InlinedIndexObject(BaseIndexObject):
553 556 def __init__(self, data, inline=0):
554 557 self._data = data
555 558 self._lgt = self._inline_scan(None)
556 559 self._inline_scan(self._lgt)
557 560 self._extra = []
558 561
559 562 def _inline_scan(self, lgt):
560 563 off = 0
561 564 if lgt is not None:
562 565 self._offsets = [0] * lgt
563 566 count = 0
564 567 while off <= len(self._data) - self.entry_size:
565 568 start = off + self.big_int_size
566 569 (s,) = struct.unpack(
567 570 b'>i',
568 571 self._data[start : start + self.int_size],
569 572 )
570 573 if lgt is not None:
571 574 self._offsets[count] = off
572 575 count += 1
573 576 off += self.entry_size + s
574 577 if off != len(self._data):
575 578 raise ValueError(b"corrupted data")
576 579 return count
577 580
578 581 def __delitem__(self, i):
579 582 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
580 583 raise ValueError(b"deleting slices only supports a:-1 with step 1")
581 584 i = i.start
582 585 self._check_index(i)
583 586 self._stripnodes(i)
584 587 if i < self._lgt:
585 588 self._offsets = self._offsets[:i]
586 589 self._lgt = i
587 590 self._extra = []
588 591 else:
589 592 self._extra = self._extra[: i - self._lgt]
590 593
591 594 def _calculate_index(self, i):
592 595 return self._offsets[i]
593 596
594 597
595 598 def parse_index2(data, inline, revlogv2=False):
596 599 if not inline:
597 600 cls = IndexObject2 if revlogv2 else IndexObject
598 601 return cls(data), None
599 602 cls = InlinedIndexObject
600 603 return cls(data, inline), (0, data)
601 604
602 605
603 606 def parse_index_cl_v2(data):
604 607 return IndexChangelogV2(data), None
605 608
606 609
607 610 class IndexObject2(IndexObject):
608 611 index_format = revlog_constants.INDEX_ENTRY_V2
609 612
610 613 def replace_sidedata_info(
611 614 self,
612 615 rev,
613 616 sidedata_offset,
614 617 sidedata_length,
615 618 offset_flags,
616 619 compression_mode,
617 620 ):
618 621 """
619 622 Replace an existing index entry's sidedata offset and length with new
620 623 ones.
621 624 This cannot be used outside of the context of sidedata rewriting,
622 625 inside the transaction that creates the revision `rev`.
623 626 """
624 627 if rev < 0:
625 628 raise KeyError
626 629 self._check_index(rev)
627 630 if rev < self._lgt:
628 631 msg = b"cannot rewrite entries outside of this transaction"
629 632 raise KeyError(msg)
630 633 else:
631 634 entry = list(self[rev])
632 635 entry[0] = offset_flags
633 636 entry[8] = sidedata_offset
634 637 entry[9] = sidedata_length
635 638 entry[11] = compression_mode
636 639 entry = tuple(entry)
637 640 new = self._pack_entry(rev, entry)
638 641 self._extra[rev - self._lgt] = new
639 642
640 643 def _unpack_entry(self, rev, data):
641 644 data = self.index_format.unpack(data)
642 645 entry = data[:10]
643 646 data_comp = data[10] & 3
644 647 sidedata_comp = (data[10] & (3 << 2)) >> 2
645 648 return entry + (data_comp, sidedata_comp)
646 649
647 650 def _pack_entry(self, rev, entry):
648 651 data = entry[:10]
649 652 data_comp = entry[10] & 3
650 653 sidedata_comp = (entry[11] & 3) << 2
651 654 data += (data_comp | sidedata_comp,)
652 655
653 656 return self.index_format.pack(*data)
654 657
655 658 def entry_binary(self, rev):
656 659 """return the raw binary string representing a revision"""
657 660 entry = self[rev]
658 661 return self._pack_entry(rev, entry)
659 662
660 663 def pack_header(self, header):
661 664 """pack header information as binary"""
662 665 msg = 'version header should go in the docket, not the index: %d'
663 666 msg %= header
664 667 raise error.ProgrammingError(msg)
665 668
666 669
667 670 class IndexChangelogV2(IndexObject2):
668 671 index_format = revlog_constants.INDEX_ENTRY_CL_V2
669 672
670 673 def _unpack_entry(self, rev, data, r=True):
671 674 items = self.index_format.unpack(data)
672 675 entry = items[:3] + (rev, rev) + items[3:8]
673 676 data_comp = items[8] & 3
674 677 sidedata_comp = (items[8] >> 2) & 3
675 678 return entry + (data_comp, sidedata_comp)
676 679
677 680 def _pack_entry(self, rev, entry):
678 681 assert entry[3] == rev, entry[3]
679 682 assert entry[4] == rev, entry[4]
680 683 data = entry[:3] + entry[5:10]
681 684 data_comp = entry[10] & 3
682 685 sidedata_comp = (entry[11] & 3) << 2
683 686 data += (data_comp | sidedata_comp,)
684 687 return self.index_format.pack(*data)
685 688
686 689
687 690 def parse_index_devel_nodemap(data, inline):
688 691 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
689 692 return PersistentNodeMapIndexObject(data), None
690 693
691 694
692 695 def parse_dirstate(dmap, copymap, st):
693 696 parents = [st[:20], st[20:40]]
694 697 # dereference fields so they will be local in loop
695 698 format = b">cllll"
696 699 e_size = struct.calcsize(format)
697 700 pos1 = 40
698 701 l = len(st)
699 702
700 703 # the inner loop
701 704 while pos1 < l:
702 705 pos2 = pos1 + e_size
703 706 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
704 707 pos1 = pos2 + e[4]
705 708 f = st[pos2:pos1]
706 709 if b'\0' in f:
707 710 f, c = f.split(b'\0')
708 711 copymap[f] = c
709 712 dmap[f] = DirstateItem.from_v1_data(*e[:4])
710 713 return parents
711 714
712 715
713 716 def pack_dirstate(dmap, copymap, pl, now):
714 717 now = int(now)
715 718 cs = stringio()
716 719 write = cs.write
717 720 write(b"".join(pl))
718 721 for f, e in pycompat.iteritems(dmap):
719 722 if e.need_delay(now):
720 723 # The file was last modified "simultaneously" with the current
721 724 # write to dirstate (i.e. within the same second for file-
722 725 # systems with a granularity of 1 sec). This commonly happens
723 726 # for at least a couple of files on 'update'.
724 727 # The user could change the file without changing its size
725 728 # within the same second. Invalidate the file's mtime in
726 729 # dirstate, forcing future 'status' calls to compare the
727 730 # contents of the file if the size is the same. This prevents
728 731 # mistakenly treating such files as clean.
729 732 e.set_possibly_dirty()
730 733
731 734 if f in copymap:
732 735 f = b"%s\0%s" % (f, copymap[f])
733 736 e = _pack(
734 737 b">cllll",
735 738 e.v1_state(),
736 739 e.v1_mode(),
737 740 e.v1_size(),
738 741 e.v1_mtime(),
739 742 len(f),
740 743 )
741 744 write(e)
742 745 write(f)
743 746 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now