##// END OF EJS Templates
dirstate-item: have all the logic go through the v1_ accessors...
marmoute -
r48738:05f2be3a default
parent child Browse files
Show More
@@ -1,735 +1,735 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from ..thirdparty import attr
18 18 from .. import (
19 19 error,
20 20 pycompat,
21 21 revlogutils,
22 22 util,
23 23 )
24 24
25 25 from ..revlogutils import nodemap as nodemaputil
26 26 from ..revlogutils import constants as revlog_constants
27 27
28 28 stringio = pycompat.bytesio
29 29
30 30
31 31 _pack = struct.pack
32 32 _unpack = struct.unpack
33 33 _compress = zlib.compress
34 34 _decompress = zlib.decompress
35 35
36 36
37 37 # a special value used internally for `size` if the file come from the other parent
38 38 FROM_P2 = -2
39 39
40 40 # a special value used internally for `size` if the file is modified/merged/added
41 41 NONNORMAL = -1
42 42
43 43 # a special value used internally for `time` if the time is ambigeous
44 44 AMBIGUOUS_TIME = -1
45 45
46 46
47 47 @attr.s(slots=True, init=False)
48 48 class DirstateItem(object):
49 49 """represent a dirstate entry
50 50
51 51 It contains:
52 52
53 53 - state (one of 'n', 'a', 'r', 'm')
54 54 - mode,
55 55 - size,
56 56 - mtime,
57 57 """
58 58
59 59 _state = attr.ib()
60 60 _mode = attr.ib()
61 61 _size = attr.ib()
62 62 _mtime = attr.ib()
63 63
64 64 def __init__(
65 65 self,
66 66 wc_tracked=False,
67 67 p1_tracked=False,
68 68 p2_tracked=False,
69 69 merged=False,
70 70 clean_p1=False,
71 71 clean_p2=False,
72 72 possibly_dirty=False,
73 73 parentfiledata=None,
74 74 ):
75 75 if merged and (clean_p1 or clean_p2):
76 76 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
77 77 raise error.ProgrammingError(msg)
78 78
79 79 self._state = None
80 80 self._mode = 0
81 81 self._size = NONNORMAL
82 82 self._mtime = AMBIGUOUS_TIME
83 83 if not (p1_tracked or p2_tracked or wc_tracked):
84 84 pass # the object has no state to record
85 85 elif merged:
86 86 self._state = b'm'
87 87 self._size = FROM_P2
88 88 self._mtime = AMBIGUOUS_TIME
89 89 elif not (p1_tracked or p2_tracked) and wc_tracked:
90 90 self._state = b'a'
91 91 self._size = NONNORMAL
92 92 self._mtime = AMBIGUOUS_TIME
93 93 elif (p1_tracked or p2_tracked) and not wc_tracked:
94 94 self._state = b'r'
95 95 self._size = 0
96 96 self._mtime = 0
97 97 elif clean_p2 and wc_tracked:
98 98 self._state = b'n'
99 99 self._size = FROM_P2
100 100 self._mtime = AMBIGUOUS_TIME
101 101 elif not p1_tracked and p2_tracked and wc_tracked:
102 102 self._state = b'n'
103 103 self._size = FROM_P2
104 104 self._mtime = AMBIGUOUS_TIME
105 105 elif possibly_dirty:
106 106 self._state = b'n'
107 107 self._size = NONNORMAL
108 108 self._mtime = AMBIGUOUS_TIME
109 109 elif wc_tracked:
110 110 # this is a "normal" file
111 111 if parentfiledata is None:
112 112 msg = b'failed to pass parentfiledata for a normal file'
113 113 raise error.ProgrammingError(msg)
114 114 self._state = b'n'
115 115 self._mode = parentfiledata[0]
116 116 self._size = parentfiledata[1]
117 117 self._mtime = parentfiledata[2]
118 118 else:
119 119 assert False, 'unreachable'
120 120
121 121 @classmethod
122 122 def new_added(cls):
123 123 """constructor to help legacy API to build a new "added" item
124 124
125 125 Should eventually be removed
126 126 """
127 127 instance = cls()
128 128 instance._state = b'a'
129 129 instance._mode = 0
130 130 instance._size = NONNORMAL
131 131 instance._mtime = AMBIGUOUS_TIME
132 132 return instance
133 133
134 134 @classmethod
135 135 def new_merged(cls):
136 136 """constructor to help legacy API to build a new "merged" item
137 137
138 138 Should eventually be removed
139 139 """
140 140 instance = cls()
141 141 instance._state = b'm'
142 142 instance._mode = 0
143 143 instance._size = FROM_P2
144 144 instance._mtime = AMBIGUOUS_TIME
145 145 return instance
146 146
147 147 @classmethod
148 148 def new_from_p2(cls):
149 149 """constructor to help legacy API to build a new "from_p2" item
150 150
151 151 Should eventually be removed
152 152 """
153 153 instance = cls()
154 154 instance._state = b'n'
155 155 instance._mode = 0
156 156 instance._size = FROM_P2
157 157 instance._mtime = AMBIGUOUS_TIME
158 158 return instance
159 159
160 160 @classmethod
161 161 def new_possibly_dirty(cls):
162 162 """constructor to help legacy API to build a new "possibly_dirty" item
163 163
164 164 Should eventually be removed
165 165 """
166 166 instance = cls()
167 167 instance._state = b'n'
168 168 instance._mode = 0
169 169 instance._size = NONNORMAL
170 170 instance._mtime = AMBIGUOUS_TIME
171 171 return instance
172 172
173 173 @classmethod
174 174 def new_normal(cls, mode, size, mtime):
175 175 """constructor to help legacy API to build a new "normal" item
176 176
177 177 Should eventually be removed
178 178 """
179 179 assert size != FROM_P2
180 180 assert size != NONNORMAL
181 181 instance = cls()
182 182 instance._state = b'n'
183 183 instance._mode = mode
184 184 instance._size = size
185 185 instance._mtime = mtime
186 186 return instance
187 187
188 188 @classmethod
189 189 def from_v1_data(cls, state, mode, size, mtime):
190 190 """Build a new DirstateItem object from V1 data
191 191
192 192 Since the dirstate-v1 format is frozen, the signature of this function
193 193 is not expected to change, unlike the __init__ one.
194 194 """
195 195 instance = cls()
196 196 instance._state = state
197 197 instance._mode = mode
198 198 instance._size = size
199 199 instance._mtime = mtime
200 200 return instance
201 201
202 202 def set_possibly_dirty(self):
203 203 """Mark a file as "possibly dirty"
204 204
205 205 This means the next status call will have to actually check its content
206 206 to make sure it is correct.
207 207 """
208 208 self._mtime = AMBIGUOUS_TIME
209 209
210 210 def set_untracked(self):
211 211 """mark a file as untracked in the working copy
212 212
213 213 This will ultimately be called by command like `hg remove`.
214 214 """
215 215 # backup the previous state (useful for merge)
216 216 size = 0
217 217 if self.merged: # merge
218 218 size = NONNORMAL
219 219 elif self.from_p2:
220 220 size = FROM_P2
221 221 self._state = b'r'
222 222 self._mode = 0
223 223 self._size = size
224 224 self._mtime = 0
225 225
226 226 @property
227 227 def mode(self):
228 return self._mode
228 return self.v1_mode()
229 229
230 230 @property
231 231 def size(self):
232 return self._size
232 return self.v1_size()
233 233
234 234 @property
235 235 def mtime(self):
236 return self._mtime
236 return self.v1_mtime()
237 237
238 238 @property
239 239 def state(self):
240 240 """
241 241 States are:
242 242 n normal
243 243 m needs merging
244 244 r marked for removal
245 245 a marked for addition
246 246
247 247 XXX This "state" is a bit obscure and mostly a direct expression of the
248 248 dirstatev1 format. It would make sense to ultimately deprecate it in
249 249 favor of the more "semantic" attributes.
250 250 """
251 return self._state
251 return self.v1_state()
252 252
253 253 @property
254 254 def tracked(self):
255 255 """True is the file is tracked in the working copy"""
256 return self._state in b"nma"
256 return self.v1_state() in b"nma"
257 257
258 258 @property
259 259 def added(self):
260 260 """True if the file has been added"""
261 return self._state == b'a'
261 return self.v1_state() == b'a'
262 262
263 263 @property
264 264 def merged(self):
265 265 """True if the file has been merged
266 266
267 267 Should only be set if a merge is in progress in the dirstate
268 268 """
269 return self._state == b'm'
269 return self.v1_state() == b'm'
270 270
271 271 @property
272 272 def from_p2(self):
273 273 """True if the file have been fetched from p2 during the current merge
274 274
275 275 This is only True is the file is currently tracked.
276 276
277 277 Should only be set if a merge is in progress in the dirstate
278 278 """
279 return self._state == b'n' and self._size == FROM_P2
279 return self.v1_state() == b'n' and self.v1_size() == FROM_P2
280 280
281 281 @property
282 282 def from_p2_removed(self):
283 283 """True if the file has been removed, but was "from_p2" initially
284 284
285 285 This property seems like an abstraction leakage and should probably be
286 286 dealt in this class (or maybe the dirstatemap) directly.
287 287 """
288 return self._state == b'r' and self._size == FROM_P2
288 return self.v1_state() == b'r' and self.v1_size() == FROM_P2
289 289
290 290 @property
291 291 def removed(self):
292 292 """True if the file has been removed"""
293 return self._state == b'r'
293 return self.v1_state() == b'r'
294 294
295 295 @property
296 296 def merged_removed(self):
297 297 """True if the file has been removed, but was "merged" initially
298 298
299 299 This property seems like an abstraction leakage and should probably be
300 300 dealt in this class (or maybe the dirstatemap) directly.
301 301 """
302 return self._state == b'r' and self._size == NONNORMAL
302 return self.v1_state() == b'r' and self.v1_size() == NONNORMAL
303 303
304 304 @property
305 305 def dm_nonnormal(self):
306 306 """True is the entry is non-normal in the dirstatemap sense
307 307
308 308 There is no reason for any code, but the dirstatemap one to use this.
309 309 """
310 return self.state != b'n' or self.mtime == AMBIGUOUS_TIME
310 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
311 311
312 312 @property
313 313 def dm_otherparent(self):
314 314 """True is the entry is `otherparent` in the dirstatemap sense
315 315
316 316 There is no reason for any code, but the dirstatemap one to use this.
317 317 """
318 return self._size == FROM_P2
318 return self.v1_size() == FROM_P2
319 319
320 320 def v1_state(self):
321 321 """return a "state" suitable for v1 serialization"""
322 322 return self._state
323 323
324 324 def v1_mode(self):
325 325 """return a "mode" suitable for v1 serialization"""
326 326 return self._mode
327 327
328 328 def v1_size(self):
329 329 """return a "size" suitable for v1 serialization"""
330 330 return self._size
331 331
332 332 def v1_mtime(self):
333 333 """return a "mtime" suitable for v1 serialization"""
334 334 return self._mtime
335 335
336 336 def need_delay(self, now):
337 337 """True if the stored mtime would be ambiguous with the current time"""
338 return self._state == b'n' and self._mtime == now
338 return self.v1_state() == b'n' and self.v1_mtime() == now
339 339
340 340
341 341 def gettype(q):
342 342 return int(q & 0xFFFF)
343 343
344 344
345 345 class BaseIndexObject(object):
346 346 # Can I be passed to an algorithme implemented in Rust ?
347 347 rust_ext_compat = 0
348 348 # Format of an index entry according to Python's `struct` language
349 349 index_format = revlog_constants.INDEX_ENTRY_V1
350 350 # Size of a C unsigned long long int, platform independent
351 351 big_int_size = struct.calcsize(b'>Q')
352 352 # Size of a C long int, platform independent
353 353 int_size = struct.calcsize(b'>i')
354 354 # An empty index entry, used as a default value to be overridden, or nullrev
355 355 null_item = (
356 356 0,
357 357 0,
358 358 0,
359 359 -1,
360 360 -1,
361 361 -1,
362 362 -1,
363 363 sha1nodeconstants.nullid,
364 364 0,
365 365 0,
366 366 revlog_constants.COMP_MODE_INLINE,
367 367 revlog_constants.COMP_MODE_INLINE,
368 368 )
369 369
370 370 @util.propertycache
371 371 def entry_size(self):
372 372 return self.index_format.size
373 373
374 374 @property
375 375 def nodemap(self):
376 376 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
377 377 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
378 378 return self._nodemap
379 379
380 380 @util.propertycache
381 381 def _nodemap(self):
382 382 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
383 383 for r in range(0, len(self)):
384 384 n = self[r][7]
385 385 nodemap[n] = r
386 386 return nodemap
387 387
388 388 def has_node(self, node):
389 389 """return True if the node exist in the index"""
390 390 return node in self._nodemap
391 391
392 392 def rev(self, node):
393 393 """return a revision for a node
394 394
395 395 If the node is unknown, raise a RevlogError"""
396 396 return self._nodemap[node]
397 397
398 398 def get_rev(self, node):
399 399 """return a revision for a node
400 400
401 401 If the node is unknown, return None"""
402 402 return self._nodemap.get(node)
403 403
404 404 def _stripnodes(self, start):
405 405 if '_nodemap' in vars(self):
406 406 for r in range(start, len(self)):
407 407 n = self[r][7]
408 408 del self._nodemap[n]
409 409
410 410 def clearcaches(self):
411 411 self.__dict__.pop('_nodemap', None)
412 412
413 413 def __len__(self):
414 414 return self._lgt + len(self._extra)
415 415
416 416 def append(self, tup):
417 417 if '_nodemap' in vars(self):
418 418 self._nodemap[tup[7]] = len(self)
419 419 data = self._pack_entry(len(self), tup)
420 420 self._extra.append(data)
421 421
422 422 def _pack_entry(self, rev, entry):
423 423 assert entry[8] == 0
424 424 assert entry[9] == 0
425 425 return self.index_format.pack(*entry[:8])
426 426
427 427 def _check_index(self, i):
428 428 if not isinstance(i, int):
429 429 raise TypeError(b"expecting int indexes")
430 430 if i < 0 or i >= len(self):
431 431 raise IndexError
432 432
433 433 def __getitem__(self, i):
434 434 if i == -1:
435 435 return self.null_item
436 436 self._check_index(i)
437 437 if i >= self._lgt:
438 438 data = self._extra[i - self._lgt]
439 439 else:
440 440 index = self._calculate_index(i)
441 441 data = self._data[index : index + self.entry_size]
442 442 r = self._unpack_entry(i, data)
443 443 if self._lgt and i == 0:
444 444 offset = revlogutils.offset_type(0, gettype(r[0]))
445 445 r = (offset,) + r[1:]
446 446 return r
447 447
448 448 def _unpack_entry(self, rev, data):
449 449 r = self.index_format.unpack(data)
450 450 r = r + (
451 451 0,
452 452 0,
453 453 revlog_constants.COMP_MODE_INLINE,
454 454 revlog_constants.COMP_MODE_INLINE,
455 455 )
456 456 return r
457 457
458 458 def pack_header(self, header):
459 459 """pack header information as binary"""
460 460 v_fmt = revlog_constants.INDEX_HEADER
461 461 return v_fmt.pack(header)
462 462
463 463 def entry_binary(self, rev):
464 464 """return the raw binary string representing a revision"""
465 465 entry = self[rev]
466 466 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
467 467 if rev == 0:
468 468 p = p[revlog_constants.INDEX_HEADER.size :]
469 469 return p
470 470
471 471
472 472 class IndexObject(BaseIndexObject):
473 473 def __init__(self, data):
474 474 assert len(data) % self.entry_size == 0, (
475 475 len(data),
476 476 self.entry_size,
477 477 len(data) % self.entry_size,
478 478 )
479 479 self._data = data
480 480 self._lgt = len(data) // self.entry_size
481 481 self._extra = []
482 482
483 483 def _calculate_index(self, i):
484 484 return i * self.entry_size
485 485
486 486 def __delitem__(self, i):
487 487 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
488 488 raise ValueError(b"deleting slices only supports a:-1 with step 1")
489 489 i = i.start
490 490 self._check_index(i)
491 491 self._stripnodes(i)
492 492 if i < self._lgt:
493 493 self._data = self._data[: i * self.entry_size]
494 494 self._lgt = i
495 495 self._extra = []
496 496 else:
497 497 self._extra = self._extra[: i - self._lgt]
498 498
499 499
500 500 class PersistentNodeMapIndexObject(IndexObject):
501 501 """a Debug oriented class to test persistent nodemap
502 502
503 503 We need a simple python object to test API and higher level behavior. See
504 504 the Rust implementation for more serious usage. This should be used only
505 505 through the dedicated `devel.persistent-nodemap` config.
506 506 """
507 507
508 508 def nodemap_data_all(self):
509 509 """Return bytes containing a full serialization of a nodemap
510 510
511 511 The nodemap should be valid for the full set of revisions in the
512 512 index."""
513 513 return nodemaputil.persistent_data(self)
514 514
515 515 def nodemap_data_incremental(self):
516 516 """Return bytes containing a incremental update to persistent nodemap
517 517
518 518 This containst the data for an append-only update of the data provided
519 519 in the last call to `update_nodemap_data`.
520 520 """
521 521 if self._nm_root is None:
522 522 return None
523 523 docket = self._nm_docket
524 524 changed, data = nodemaputil.update_persistent_data(
525 525 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
526 526 )
527 527
528 528 self._nm_root = self._nm_max_idx = self._nm_docket = None
529 529 return docket, changed, data
530 530
531 531 def update_nodemap_data(self, docket, nm_data):
532 532 """provide full block of persisted binary data for a nodemap
533 533
534 534 The data are expected to come from disk. See `nodemap_data_all` for a
535 535 produceur of such data."""
536 536 if nm_data is not None:
537 537 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
538 538 if self._nm_root:
539 539 self._nm_docket = docket
540 540 else:
541 541 self._nm_root = self._nm_max_idx = self._nm_docket = None
542 542
543 543
544 544 class InlinedIndexObject(BaseIndexObject):
545 545 def __init__(self, data, inline=0):
546 546 self._data = data
547 547 self._lgt = self._inline_scan(None)
548 548 self._inline_scan(self._lgt)
549 549 self._extra = []
550 550
551 551 def _inline_scan(self, lgt):
552 552 off = 0
553 553 if lgt is not None:
554 554 self._offsets = [0] * lgt
555 555 count = 0
556 556 while off <= len(self._data) - self.entry_size:
557 557 start = off + self.big_int_size
558 558 (s,) = struct.unpack(
559 559 b'>i',
560 560 self._data[start : start + self.int_size],
561 561 )
562 562 if lgt is not None:
563 563 self._offsets[count] = off
564 564 count += 1
565 565 off += self.entry_size + s
566 566 if off != len(self._data):
567 567 raise ValueError(b"corrupted data")
568 568 return count
569 569
570 570 def __delitem__(self, i):
571 571 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
572 572 raise ValueError(b"deleting slices only supports a:-1 with step 1")
573 573 i = i.start
574 574 self._check_index(i)
575 575 self._stripnodes(i)
576 576 if i < self._lgt:
577 577 self._offsets = self._offsets[:i]
578 578 self._lgt = i
579 579 self._extra = []
580 580 else:
581 581 self._extra = self._extra[: i - self._lgt]
582 582
583 583 def _calculate_index(self, i):
584 584 return self._offsets[i]
585 585
586 586
587 587 def parse_index2(data, inline, revlogv2=False):
588 588 if not inline:
589 589 cls = IndexObject2 if revlogv2 else IndexObject
590 590 return cls(data), None
591 591 cls = InlinedIndexObject
592 592 return cls(data, inline), (0, data)
593 593
594 594
595 595 def parse_index_cl_v2(data):
596 596 return IndexChangelogV2(data), None
597 597
598 598
599 599 class IndexObject2(IndexObject):
600 600 index_format = revlog_constants.INDEX_ENTRY_V2
601 601
602 602 def replace_sidedata_info(
603 603 self,
604 604 rev,
605 605 sidedata_offset,
606 606 sidedata_length,
607 607 offset_flags,
608 608 compression_mode,
609 609 ):
610 610 """
611 611 Replace an existing index entry's sidedata offset and length with new
612 612 ones.
613 613 This cannot be used outside of the context of sidedata rewriting,
614 614 inside the transaction that creates the revision `rev`.
615 615 """
616 616 if rev < 0:
617 617 raise KeyError
618 618 self._check_index(rev)
619 619 if rev < self._lgt:
620 620 msg = b"cannot rewrite entries outside of this transaction"
621 621 raise KeyError(msg)
622 622 else:
623 623 entry = list(self[rev])
624 624 entry[0] = offset_flags
625 625 entry[8] = sidedata_offset
626 626 entry[9] = sidedata_length
627 627 entry[11] = compression_mode
628 628 entry = tuple(entry)
629 629 new = self._pack_entry(rev, entry)
630 630 self._extra[rev - self._lgt] = new
631 631
632 632 def _unpack_entry(self, rev, data):
633 633 data = self.index_format.unpack(data)
634 634 entry = data[:10]
635 635 data_comp = data[10] & 3
636 636 sidedata_comp = (data[10] & (3 << 2)) >> 2
637 637 return entry + (data_comp, sidedata_comp)
638 638
639 639 def _pack_entry(self, rev, entry):
640 640 data = entry[:10]
641 641 data_comp = entry[10] & 3
642 642 sidedata_comp = (entry[11] & 3) << 2
643 643 data += (data_comp | sidedata_comp,)
644 644
645 645 return self.index_format.pack(*data)
646 646
647 647 def entry_binary(self, rev):
648 648 """return the raw binary string representing a revision"""
649 649 entry = self[rev]
650 650 return self._pack_entry(rev, entry)
651 651
652 652 def pack_header(self, header):
653 653 """pack header information as binary"""
654 654 msg = 'version header should go in the docket, not the index: %d'
655 655 msg %= header
656 656 raise error.ProgrammingError(msg)
657 657
658 658
659 659 class IndexChangelogV2(IndexObject2):
660 660 index_format = revlog_constants.INDEX_ENTRY_CL_V2
661 661
662 662 def _unpack_entry(self, rev, data, r=True):
663 663 items = self.index_format.unpack(data)
664 664 entry = items[:3] + (rev, rev) + items[3:8]
665 665 data_comp = items[8] & 3
666 666 sidedata_comp = (items[8] >> 2) & 3
667 667 return entry + (data_comp, sidedata_comp)
668 668
669 669 def _pack_entry(self, rev, entry):
670 670 assert entry[3] == rev, entry[3]
671 671 assert entry[4] == rev, entry[4]
672 672 data = entry[:3] + entry[5:10]
673 673 data_comp = entry[10] & 3
674 674 sidedata_comp = (entry[11] & 3) << 2
675 675 data += (data_comp | sidedata_comp,)
676 676 return self.index_format.pack(*data)
677 677
678 678
679 679 def parse_index_devel_nodemap(data, inline):
680 680 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
681 681 return PersistentNodeMapIndexObject(data), None
682 682
683 683
684 684 def parse_dirstate(dmap, copymap, st):
685 685 parents = [st[:20], st[20:40]]
686 686 # dereference fields so they will be local in loop
687 687 format = b">cllll"
688 688 e_size = struct.calcsize(format)
689 689 pos1 = 40
690 690 l = len(st)
691 691
692 692 # the inner loop
693 693 while pos1 < l:
694 694 pos2 = pos1 + e_size
695 695 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
696 696 pos1 = pos2 + e[4]
697 697 f = st[pos2:pos1]
698 698 if b'\0' in f:
699 699 f, c = f.split(b'\0')
700 700 copymap[f] = c
701 701 dmap[f] = DirstateItem.from_v1_data(*e[:4])
702 702 return parents
703 703
704 704
705 705 def pack_dirstate(dmap, copymap, pl, now):
706 706 now = int(now)
707 707 cs = stringio()
708 708 write = cs.write
709 709 write(b"".join(pl))
710 710 for f, e in pycompat.iteritems(dmap):
711 711 if e.need_delay(now):
712 712 # The file was last modified "simultaneously" with the current
713 713 # write to dirstate (i.e. within the same second for file-
714 714 # systems with a granularity of 1 sec). This commonly happens
715 715 # for at least a couple of files on 'update'.
716 716 # The user could change the file without changing its size
717 717 # within the same second. Invalidate the file's mtime in
718 718 # dirstate, forcing future 'status' calls to compare the
719 719 # contents of the file if the size is the same. This prevents
720 720 # mistakenly treating such files as clean.
721 721 e.set_possibly_dirty()
722 722
723 723 if f in copymap:
724 724 f = b"%s\0%s" % (f, copymap[f])
725 725 e = _pack(
726 726 b">cllll",
727 727 e.v1_state(),
728 728 e.v1_mode(),
729 729 e.v1_size(),
730 730 e.v1_mtime(),
731 731 len(f),
732 732 )
733 733 write(e)
734 734 write(f)
735 735 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now