##// END OF EJS Templates
dirstate-v2: Use attributes as intended instead of properties in v2_data()...
Simon Sapin -
r49043:db589732 default
parent child Browse files
Show More
@@ -1,789 +1,789 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11 import struct
12 12 import zlib
13 13
14 14 from ..node import (
15 15 nullrev,
16 16 sha1nodeconstants,
17 17 )
18 18 from ..thirdparty import attr
19 19 from .. import (
20 20 error,
21 21 pycompat,
22 22 revlogutils,
23 23 util,
24 24 )
25 25
26 26 from ..revlogutils import nodemap as nodemaputil
27 27 from ..revlogutils import constants as revlog_constants
28 28
29 29 stringio = pycompat.bytesio
30 30
31 31
32 32 _pack = struct.pack
33 33 _unpack = struct.unpack
34 34 _compress = zlib.compress
35 35 _decompress = zlib.decompress
36 36
37 37
38 38 # a special value used internally for `size` if the file come from the other parent
39 39 FROM_P2 = -2
40 40
41 41 # a special value used internally for `size` if the file is modified/merged/added
42 42 NONNORMAL = -1
43 43
44 44 # a special value used internally for `time` if the time is ambigeous
45 45 AMBIGUOUS_TIME = -1
46 46
47 47 # Bits of the `flags` byte inside a node in the file format
48 48 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
49 49 DIRSTATE_V2_P1_TRACKED = 1 << 1
50 50 DIRSTATE_V2_P2_INFO = 1 << 2
51 51 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 3
52 52 DIRSTATE_V2_HAS_MTIME = 1 << 4
53 53 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 5
54 54 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 6
55 55
56 56
57 57 @attr.s(slots=True, init=False)
58 58 class DirstateItem(object):
59 59 """represent a dirstate entry
60 60
61 61 It hold multiple attributes
62 62
63 63 # about file tracking
64 64 - wc_tracked: is the file tracked by the working copy
65 65 - p1_tracked: is the file tracked in working copy first parent
66 66 - p2_info: the file has been involved in some merge operation. Either
67 67 because it was actually merged, or because the p2 version was
68 68 ahead, or because some rename moved it there. In either case
69 69 `hg status` will want it displayed as modified.
70 70
71 71 # about the file state expected from p1 manifest:
72 72 - mode: the file mode in p1
73 73 - size: the file size in p1
74 74
75 75 These value can be set to None, which mean we don't have a meaningful value
76 76 to compare with. Either because we don't really care about them as there
77 77 `status` is known without having to look at the disk or because we don't
78 78 know these right now and a full comparison will be needed to find out if
79 79 the file is clean.
80 80
81 81 # about the file state on disk last time we saw it:
82 82 - mtime: the last known clean mtime for the file.
83 83
84 84 This value can be set to None if no cachable state exist. Either because we
85 85 do not care (see previous section) or because we could not cache something
86 86 yet.
87 87 """
88 88
89 89 _wc_tracked = attr.ib()
90 90 _p1_tracked = attr.ib()
91 91 _p2_info = attr.ib()
92 92 _mode = attr.ib()
93 93 _size = attr.ib()
94 94 _mtime = attr.ib()
95 95
96 96 def __init__(
97 97 self,
98 98 wc_tracked=False,
99 99 p1_tracked=False,
100 100 p2_info=False,
101 101 has_meaningful_data=True,
102 102 has_meaningful_mtime=True,
103 103 parentfiledata=None,
104 104 ):
105 105 self._wc_tracked = wc_tracked
106 106 self._p1_tracked = p1_tracked
107 107 self._p2_info = p2_info
108 108
109 109 self._mode = None
110 110 self._size = None
111 111 self._mtime = None
112 112 if parentfiledata is None:
113 113 has_meaningful_mtime = False
114 114 has_meaningful_data = False
115 115 if has_meaningful_data:
116 116 self._mode = parentfiledata[0]
117 117 self._size = parentfiledata[1]
118 118 if has_meaningful_mtime:
119 119 self._mtime = parentfiledata[2]
120 120
121 121 @classmethod
122 122 def from_v2_data(cls, flags, size, mtime):
123 123 """Build a new DirstateItem object from V2 data"""
124 124 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
125 125 mode = None
126 126 if has_mode_size:
127 127 assert stat.S_IXUSR == 0o100
128 128 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
129 129 mode = 0o755
130 130 else:
131 131 mode = 0o644
132 132 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
133 133 mode |= stat.S_IFLNK
134 134 else:
135 135 mode |= stat.S_IFREG
136 136 return cls(
137 137 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
138 138 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
139 139 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
140 140 has_meaningful_data=has_mode_size,
141 141 has_meaningful_mtime=bool(flags & DIRSTATE_V2_HAS_MTIME),
142 142 parentfiledata=(mode, size, mtime),
143 143 )
144 144
145 145 @classmethod
146 146 def from_v1_data(cls, state, mode, size, mtime):
147 147 """Build a new DirstateItem object from V1 data
148 148
149 149 Since the dirstate-v1 format is frozen, the signature of this function
150 150 is not expected to change, unlike the __init__ one.
151 151 """
152 152 if state == b'm':
153 153 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
154 154 elif state == b'a':
155 155 return cls(wc_tracked=True)
156 156 elif state == b'r':
157 157 if size == NONNORMAL:
158 158 p1_tracked = True
159 159 p2_info = True
160 160 elif size == FROM_P2:
161 161 p1_tracked = False
162 162 p2_info = True
163 163 else:
164 164 p1_tracked = True
165 165 p2_info = False
166 166 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
167 167 elif state == b'n':
168 168 if size == FROM_P2:
169 169 return cls(wc_tracked=True, p2_info=True)
170 170 elif size == NONNORMAL:
171 171 return cls(wc_tracked=True, p1_tracked=True)
172 172 elif mtime == AMBIGUOUS_TIME:
173 173 return cls(
174 174 wc_tracked=True,
175 175 p1_tracked=True,
176 176 has_meaningful_mtime=False,
177 177 parentfiledata=(mode, size, 42),
178 178 )
179 179 else:
180 180 return cls(
181 181 wc_tracked=True,
182 182 p1_tracked=True,
183 183 parentfiledata=(mode, size, mtime),
184 184 )
185 185 else:
186 186 raise RuntimeError(b'unknown state: %s' % state)
187 187
188 188 def set_possibly_dirty(self):
189 189 """Mark a file as "possibly dirty"
190 190
191 191 This means the next status call will have to actually check its content
192 192 to make sure it is correct.
193 193 """
194 194 self._mtime = None
195 195
196 196 def set_clean(self, mode, size, mtime):
197 197 """mark a file as "clean" cancelling potential "possibly dirty call"
198 198
199 199 Note: this function is a descendant of `dirstate.normal` and is
200 200 currently expected to be call on "normal" entry only. There are not
201 201 reason for this to not change in the future as long as the ccode is
202 202 updated to preserve the proper state of the non-normal files.
203 203 """
204 204 self._wc_tracked = True
205 205 self._p1_tracked = True
206 206 self._mode = mode
207 207 self._size = size
208 208 self._mtime = mtime
209 209
210 210 def set_tracked(self):
211 211 """mark a file as tracked in the working copy
212 212
213 213 This will ultimately be called by command like `hg add`.
214 214 """
215 215 self._wc_tracked = True
216 216 # `set_tracked` is replacing various `normallookup` call. So we mark
217 217 # the files as needing lookup
218 218 #
219 219 # Consider dropping this in the future in favor of something less broad.
220 220 self._mtime = None
221 221
222 222 def set_untracked(self):
223 223 """mark a file as untracked in the working copy
224 224
225 225 This will ultimately be called by command like `hg remove`.
226 226 """
227 227 self._wc_tracked = False
228 228 self._mode = None
229 229 self._size = None
230 230 self._mtime = None
231 231
232 232 def drop_merge_data(self):
233 233 """remove all "merge-only" from a DirstateItem
234 234
235 235 This is to be call by the dirstatemap code when the second parent is dropped
236 236 """
237 237 if self._p2_info:
238 238 self._p2_info = False
239 239 self._mode = None
240 240 self._size = None
241 241 self._mtime = None
242 242
243 243 @property
244 244 def mode(self):
245 245 return self.v1_mode()
246 246
247 247 @property
248 248 def size(self):
249 249 return self.v1_size()
250 250
251 251 @property
252 252 def mtime(self):
253 253 return self.v1_mtime()
254 254
255 255 @property
256 256 def state(self):
257 257 """
258 258 States are:
259 259 n normal
260 260 m needs merging
261 261 r marked for removal
262 262 a marked for addition
263 263
264 264 XXX This "state" is a bit obscure and mostly a direct expression of the
265 265 dirstatev1 format. It would make sense to ultimately deprecate it in
266 266 favor of the more "semantic" attributes.
267 267 """
268 268 if not self.any_tracked:
269 269 return b'?'
270 270 return self.v1_state()
271 271
272 272 @property
273 273 def tracked(self):
274 274 """True is the file is tracked in the working copy"""
275 275 return self._wc_tracked
276 276
277 277 @property
278 278 def any_tracked(self):
279 279 """True is the file is tracked anywhere (wc or parents)"""
280 280 return self._wc_tracked or self._p1_tracked or self._p2_info
281 281
282 282 @property
283 283 def added(self):
284 284 """True if the file has been added"""
285 285 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
286 286
287 287 @property
288 288 def maybe_clean(self):
289 289 """True if the file has a chance to be in the "clean" state"""
290 290 if not self._wc_tracked:
291 291 return False
292 292 elif not self._p1_tracked:
293 293 return False
294 294 elif self._p2_info:
295 295 return False
296 296 return True
297 297
298 298 @property
299 299 def p1_tracked(self):
300 300 """True if the file is tracked in the first parent manifest"""
301 301 return self._p1_tracked
302 302
303 303 @property
304 304 def p2_info(self):
305 305 """True if the file needed to merge or apply any input from p2
306 306
307 307 See the class documentation for details.
308 308 """
309 309 return self._wc_tracked and self._p2_info
310 310
311 311 @property
312 312 def removed(self):
313 313 """True if the file has been removed"""
314 314 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
315 315
316 316 def v2_data(self):
317 317 """Returns (flags, mode, size, mtime) for v2 serialization"""
318 318 flags = 0
319 319 if self._wc_tracked:
320 320 flags |= DIRSTATE_V2_WDIR_TRACKED
321 321 if self._p1_tracked:
322 322 flags |= DIRSTATE_V2_P1_TRACKED
323 323 if self._p2_info:
324 324 flags |= DIRSTATE_V2_P2_INFO
325 if self.mode is not None and self.size is not None:
325 if self._mode is not None and self._size is not None:
326 326 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
327 327 if self.mode & stat.S_IXUSR:
328 328 flags |= DIRSTATE_V2_MODE_EXEC_PERM
329 329 if stat.S_ISLNK(self.mode):
330 330 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
331 if self.mtime is not None:
331 if self._mtime is not None:
332 332 flags |= DIRSTATE_V2_HAS_MTIME
333 return (flags, self.size or 0, self.mtime or 0)
333 return (flags, self._size or 0, self._mtime or 0)
334 334
335 335 def v1_state(self):
336 336 """return a "state" suitable for v1 serialization"""
337 337 if not self.any_tracked:
338 338 # the object has no state to record, this is -currently-
339 339 # unsupported
340 340 raise RuntimeError('untracked item')
341 341 elif self.removed:
342 342 return b'r'
343 343 elif self._p1_tracked and self._p2_info:
344 344 return b'm'
345 345 elif self.added:
346 346 return b'a'
347 347 else:
348 348 return b'n'
349 349
350 350 def v1_mode(self):
351 351 """return a "mode" suitable for v1 serialization"""
352 352 return self._mode if self._mode is not None else 0
353 353
354 354 def v1_size(self):
355 355 """return a "size" suitable for v1 serialization"""
356 356 if not self.any_tracked:
357 357 # the object has no state to record, this is -currently-
358 358 # unsupported
359 359 raise RuntimeError('untracked item')
360 360 elif self.removed and self._p1_tracked and self._p2_info:
361 361 return NONNORMAL
362 362 elif self._p2_info:
363 363 return FROM_P2
364 364 elif self.removed:
365 365 return 0
366 366 elif self.added:
367 367 return NONNORMAL
368 368 elif self._size is None:
369 369 return NONNORMAL
370 370 else:
371 371 return self._size
372 372
373 373 def v1_mtime(self):
374 374 """return a "mtime" suitable for v1 serialization"""
375 375 if not self.any_tracked:
376 376 # the object has no state to record, this is -currently-
377 377 # unsupported
378 378 raise RuntimeError('untracked item')
379 379 elif self.removed:
380 380 return 0
381 381 elif self._mtime is None:
382 382 return AMBIGUOUS_TIME
383 383 elif self._p2_info:
384 384 return AMBIGUOUS_TIME
385 385 elif not self._p1_tracked:
386 386 return AMBIGUOUS_TIME
387 387 else:
388 388 return self._mtime
389 389
390 390 def need_delay(self, now):
391 391 """True if the stored mtime would be ambiguous with the current time"""
392 392 return self.v1_state() == b'n' and self.v1_mtime() == now
393 393
394 394
395 395 def gettype(q):
396 396 return int(q & 0xFFFF)
397 397
398 398
399 399 class BaseIndexObject(object):
400 400 # Can I be passed to an algorithme implemented in Rust ?
401 401 rust_ext_compat = 0
402 402 # Format of an index entry according to Python's `struct` language
403 403 index_format = revlog_constants.INDEX_ENTRY_V1
404 404 # Size of a C unsigned long long int, platform independent
405 405 big_int_size = struct.calcsize(b'>Q')
406 406 # Size of a C long int, platform independent
407 407 int_size = struct.calcsize(b'>i')
408 408 # An empty index entry, used as a default value to be overridden, or nullrev
409 409 null_item = (
410 410 0,
411 411 0,
412 412 0,
413 413 -1,
414 414 -1,
415 415 -1,
416 416 -1,
417 417 sha1nodeconstants.nullid,
418 418 0,
419 419 0,
420 420 revlog_constants.COMP_MODE_INLINE,
421 421 revlog_constants.COMP_MODE_INLINE,
422 422 )
423 423
424 424 @util.propertycache
425 425 def entry_size(self):
426 426 return self.index_format.size
427 427
428 428 @property
429 429 def nodemap(self):
430 430 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
431 431 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
432 432 return self._nodemap
433 433
434 434 @util.propertycache
435 435 def _nodemap(self):
436 436 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
437 437 for r in range(0, len(self)):
438 438 n = self[r][7]
439 439 nodemap[n] = r
440 440 return nodemap
441 441
442 442 def has_node(self, node):
443 443 """return True if the node exist in the index"""
444 444 return node in self._nodemap
445 445
446 446 def rev(self, node):
447 447 """return a revision for a node
448 448
449 449 If the node is unknown, raise a RevlogError"""
450 450 return self._nodemap[node]
451 451
452 452 def get_rev(self, node):
453 453 """return a revision for a node
454 454
455 455 If the node is unknown, return None"""
456 456 return self._nodemap.get(node)
457 457
458 458 def _stripnodes(self, start):
459 459 if '_nodemap' in vars(self):
460 460 for r in range(start, len(self)):
461 461 n = self[r][7]
462 462 del self._nodemap[n]
463 463
464 464 def clearcaches(self):
465 465 self.__dict__.pop('_nodemap', None)
466 466
467 467 def __len__(self):
468 468 return self._lgt + len(self._extra)
469 469
470 470 def append(self, tup):
471 471 if '_nodemap' in vars(self):
472 472 self._nodemap[tup[7]] = len(self)
473 473 data = self._pack_entry(len(self), tup)
474 474 self._extra.append(data)
475 475
476 476 def _pack_entry(self, rev, entry):
477 477 assert entry[8] == 0
478 478 assert entry[9] == 0
479 479 return self.index_format.pack(*entry[:8])
480 480
481 481 def _check_index(self, i):
482 482 if not isinstance(i, int):
483 483 raise TypeError(b"expecting int indexes")
484 484 if i < 0 or i >= len(self):
485 485 raise IndexError
486 486
487 487 def __getitem__(self, i):
488 488 if i == -1:
489 489 return self.null_item
490 490 self._check_index(i)
491 491 if i >= self._lgt:
492 492 data = self._extra[i - self._lgt]
493 493 else:
494 494 index = self._calculate_index(i)
495 495 data = self._data[index : index + self.entry_size]
496 496 r = self._unpack_entry(i, data)
497 497 if self._lgt and i == 0:
498 498 offset = revlogutils.offset_type(0, gettype(r[0]))
499 499 r = (offset,) + r[1:]
500 500 return r
501 501
502 502 def _unpack_entry(self, rev, data):
503 503 r = self.index_format.unpack(data)
504 504 r = r + (
505 505 0,
506 506 0,
507 507 revlog_constants.COMP_MODE_INLINE,
508 508 revlog_constants.COMP_MODE_INLINE,
509 509 )
510 510 return r
511 511
512 512 def pack_header(self, header):
513 513 """pack header information as binary"""
514 514 v_fmt = revlog_constants.INDEX_HEADER
515 515 return v_fmt.pack(header)
516 516
517 517 def entry_binary(self, rev):
518 518 """return the raw binary string representing a revision"""
519 519 entry = self[rev]
520 520 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
521 521 if rev == 0:
522 522 p = p[revlog_constants.INDEX_HEADER.size :]
523 523 return p
524 524
525 525
526 526 class IndexObject(BaseIndexObject):
527 527 def __init__(self, data):
528 528 assert len(data) % self.entry_size == 0, (
529 529 len(data),
530 530 self.entry_size,
531 531 len(data) % self.entry_size,
532 532 )
533 533 self._data = data
534 534 self._lgt = len(data) // self.entry_size
535 535 self._extra = []
536 536
537 537 def _calculate_index(self, i):
538 538 return i * self.entry_size
539 539
540 540 def __delitem__(self, i):
541 541 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
542 542 raise ValueError(b"deleting slices only supports a:-1 with step 1")
543 543 i = i.start
544 544 self._check_index(i)
545 545 self._stripnodes(i)
546 546 if i < self._lgt:
547 547 self._data = self._data[: i * self.entry_size]
548 548 self._lgt = i
549 549 self._extra = []
550 550 else:
551 551 self._extra = self._extra[: i - self._lgt]
552 552
553 553
554 554 class PersistentNodeMapIndexObject(IndexObject):
555 555 """a Debug oriented class to test persistent nodemap
556 556
557 557 We need a simple python object to test API and higher level behavior. See
558 558 the Rust implementation for more serious usage. This should be used only
559 559 through the dedicated `devel.persistent-nodemap` config.
560 560 """
561 561
562 562 def nodemap_data_all(self):
563 563 """Return bytes containing a full serialization of a nodemap
564 564
565 565 The nodemap should be valid for the full set of revisions in the
566 566 index."""
567 567 return nodemaputil.persistent_data(self)
568 568
569 569 def nodemap_data_incremental(self):
570 570 """Return bytes containing a incremental update to persistent nodemap
571 571
572 572 This containst the data for an append-only update of the data provided
573 573 in the last call to `update_nodemap_data`.
574 574 """
575 575 if self._nm_root is None:
576 576 return None
577 577 docket = self._nm_docket
578 578 changed, data = nodemaputil.update_persistent_data(
579 579 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
580 580 )
581 581
582 582 self._nm_root = self._nm_max_idx = self._nm_docket = None
583 583 return docket, changed, data
584 584
585 585 def update_nodemap_data(self, docket, nm_data):
586 586 """provide full block of persisted binary data for a nodemap
587 587
588 588 The data are expected to come from disk. See `nodemap_data_all` for a
589 589 produceur of such data."""
590 590 if nm_data is not None:
591 591 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
592 592 if self._nm_root:
593 593 self._nm_docket = docket
594 594 else:
595 595 self._nm_root = self._nm_max_idx = self._nm_docket = None
596 596
597 597
598 598 class InlinedIndexObject(BaseIndexObject):
599 599 def __init__(self, data, inline=0):
600 600 self._data = data
601 601 self._lgt = self._inline_scan(None)
602 602 self._inline_scan(self._lgt)
603 603 self._extra = []
604 604
605 605 def _inline_scan(self, lgt):
606 606 off = 0
607 607 if lgt is not None:
608 608 self._offsets = [0] * lgt
609 609 count = 0
610 610 while off <= len(self._data) - self.entry_size:
611 611 start = off + self.big_int_size
612 612 (s,) = struct.unpack(
613 613 b'>i',
614 614 self._data[start : start + self.int_size],
615 615 )
616 616 if lgt is not None:
617 617 self._offsets[count] = off
618 618 count += 1
619 619 off += self.entry_size + s
620 620 if off != len(self._data):
621 621 raise ValueError(b"corrupted data")
622 622 return count
623 623
624 624 def __delitem__(self, i):
625 625 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
626 626 raise ValueError(b"deleting slices only supports a:-1 with step 1")
627 627 i = i.start
628 628 self._check_index(i)
629 629 self._stripnodes(i)
630 630 if i < self._lgt:
631 631 self._offsets = self._offsets[:i]
632 632 self._lgt = i
633 633 self._extra = []
634 634 else:
635 635 self._extra = self._extra[: i - self._lgt]
636 636
637 637 def _calculate_index(self, i):
638 638 return self._offsets[i]
639 639
640 640
641 641 def parse_index2(data, inline, revlogv2=False):
642 642 if not inline:
643 643 cls = IndexObject2 if revlogv2 else IndexObject
644 644 return cls(data), None
645 645 cls = InlinedIndexObject
646 646 return cls(data, inline), (0, data)
647 647
648 648
649 649 def parse_index_cl_v2(data):
650 650 return IndexChangelogV2(data), None
651 651
652 652
653 653 class IndexObject2(IndexObject):
654 654 index_format = revlog_constants.INDEX_ENTRY_V2
655 655
656 656 def replace_sidedata_info(
657 657 self,
658 658 rev,
659 659 sidedata_offset,
660 660 sidedata_length,
661 661 offset_flags,
662 662 compression_mode,
663 663 ):
664 664 """
665 665 Replace an existing index entry's sidedata offset and length with new
666 666 ones.
667 667 This cannot be used outside of the context of sidedata rewriting,
668 668 inside the transaction that creates the revision `rev`.
669 669 """
670 670 if rev < 0:
671 671 raise KeyError
672 672 self._check_index(rev)
673 673 if rev < self._lgt:
674 674 msg = b"cannot rewrite entries outside of this transaction"
675 675 raise KeyError(msg)
676 676 else:
677 677 entry = list(self[rev])
678 678 entry[0] = offset_flags
679 679 entry[8] = sidedata_offset
680 680 entry[9] = sidedata_length
681 681 entry[11] = compression_mode
682 682 entry = tuple(entry)
683 683 new = self._pack_entry(rev, entry)
684 684 self._extra[rev - self._lgt] = new
685 685
686 686 def _unpack_entry(self, rev, data):
687 687 data = self.index_format.unpack(data)
688 688 entry = data[:10]
689 689 data_comp = data[10] & 3
690 690 sidedata_comp = (data[10] & (3 << 2)) >> 2
691 691 return entry + (data_comp, sidedata_comp)
692 692
693 693 def _pack_entry(self, rev, entry):
694 694 data = entry[:10]
695 695 data_comp = entry[10] & 3
696 696 sidedata_comp = (entry[11] & 3) << 2
697 697 data += (data_comp | sidedata_comp,)
698 698
699 699 return self.index_format.pack(*data)
700 700
701 701 def entry_binary(self, rev):
702 702 """return the raw binary string representing a revision"""
703 703 entry = self[rev]
704 704 return self._pack_entry(rev, entry)
705 705
706 706 def pack_header(self, header):
707 707 """pack header information as binary"""
708 708 msg = 'version header should go in the docket, not the index: %d'
709 709 msg %= header
710 710 raise error.ProgrammingError(msg)
711 711
712 712
713 713 class IndexChangelogV2(IndexObject2):
714 714 index_format = revlog_constants.INDEX_ENTRY_CL_V2
715 715
716 716 def _unpack_entry(self, rev, data, r=True):
717 717 items = self.index_format.unpack(data)
718 718 entry = items[:3] + (rev, rev) + items[3:8]
719 719 data_comp = items[8] & 3
720 720 sidedata_comp = (items[8] >> 2) & 3
721 721 return entry + (data_comp, sidedata_comp)
722 722
723 723 def _pack_entry(self, rev, entry):
724 724 assert entry[3] == rev, entry[3]
725 725 assert entry[4] == rev, entry[4]
726 726 data = entry[:3] + entry[5:10]
727 727 data_comp = entry[10] & 3
728 728 sidedata_comp = (entry[11] & 3) << 2
729 729 data += (data_comp | sidedata_comp,)
730 730 return self.index_format.pack(*data)
731 731
732 732
733 733 def parse_index_devel_nodemap(data, inline):
734 734 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
735 735 return PersistentNodeMapIndexObject(data), None
736 736
737 737
738 738 def parse_dirstate(dmap, copymap, st):
739 739 parents = [st[:20], st[20:40]]
740 740 # dereference fields so they will be local in loop
741 741 format = b">cllll"
742 742 e_size = struct.calcsize(format)
743 743 pos1 = 40
744 744 l = len(st)
745 745
746 746 # the inner loop
747 747 while pos1 < l:
748 748 pos2 = pos1 + e_size
749 749 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
750 750 pos1 = pos2 + e[4]
751 751 f = st[pos2:pos1]
752 752 if b'\0' in f:
753 753 f, c = f.split(b'\0')
754 754 copymap[f] = c
755 755 dmap[f] = DirstateItem.from_v1_data(*e[:4])
756 756 return parents
757 757
758 758
759 759 def pack_dirstate(dmap, copymap, pl, now):
760 760 now = int(now)
761 761 cs = stringio()
762 762 write = cs.write
763 763 write(b"".join(pl))
764 764 for f, e in pycompat.iteritems(dmap):
765 765 if e.need_delay(now):
766 766 # The file was last modified "simultaneously" with the current
767 767 # write to dirstate (i.e. within the same second for file-
768 768 # systems with a granularity of 1 sec). This commonly happens
769 769 # for at least a couple of files on 'update'.
770 770 # The user could change the file without changing its size
771 771 # within the same second. Invalidate the file's mtime in
772 772 # dirstate, forcing future 'status' calls to compare the
773 773 # contents of the file if the size is the same. This prevents
774 774 # mistakenly treating such files as clean.
775 775 e.set_possibly_dirty()
776 776
777 777 if f in copymap:
778 778 f = b"%s\0%s" % (f, copymap[f])
779 779 e = _pack(
780 780 b">cllll",
781 781 e.v1_state(),
782 782 e.v1_mode(),
783 783 e.v1_size(),
784 784 e.v1_mtime(),
785 785 len(f),
786 786 )
787 787 write(e)
788 788 write(f)
789 789 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now