##// END OF EJS Templates
dirstatemap: remove `_insert_entry`...
Raphaël Gomès -
r49994:77dfde41 default
parent child Browse files
Show More
@@ -1,750 +1,738
1 1 # dirstatemap.py
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6
7 7 import errno
8 8
9 9 from .i18n import _
10 10
11 11 from . import (
12 12 error,
13 13 pathutil,
14 14 policy,
15 15 txnutil,
16 16 util,
17 17 )
18 18
19 19 from .dirstateutils import (
20 20 docket as docketmod,
21 21 v2,
22 22 )
23 23
24 24 parsers = policy.importmod('parsers')
25 25 rustmod = policy.importrust('dirstate')
26 26
27 27 propertycache = util.propertycache
28 28
29 29 if rustmod is None:
30 30 DirstateItem = parsers.DirstateItem
31 31 else:
32 32 DirstateItem = rustmod.DirstateItem
33 33
34 34 rangemask = 0x7FFFFFFF
35 35
36 36
37 37 class _dirstatemapcommon:
38 38 """
39 39 Methods that are identical for both implementations of the dirstatemap
40 40 class, with and without Rust extensions enabled.
41 41 """
42 42
43 43 # please pytype
44 44
45 45 _map = None
46 46 copymap = None
47 47
48 48 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
49 49 self._use_dirstate_v2 = use_dirstate_v2
50 50 self._nodeconstants = nodeconstants
51 51 self._ui = ui
52 52 self._opener = opener
53 53 self._root = root
54 54 self._filename = b'dirstate'
55 55 self._nodelen = 20 # Also update Rust code when changing this!
56 56 self._parents = None
57 57 self._dirtyparents = False
58 58 self._docket = None
59 59
60 60 # for consistent view between _pl() and _read() invocations
61 61 self._pendingmode = None
62 62
63 63 def preload(self):
64 64 """Loads the underlying data, if it's not already loaded"""
65 65 self._map
66 66
67 67 def get(self, key, default=None):
68 68 return self._map.get(key, default)
69 69
70 70 def __len__(self):
71 71 return len(self._map)
72 72
73 73 def __iter__(self):
74 74 return iter(self._map)
75 75
76 76 def __contains__(self, key):
77 77 return key in self._map
78 78
79 79 def __getitem__(self, item):
80 80 return self._map[item]
81 81
82 82 ### sub-class utility method
83 83 #
84 84 # Use to allow for generic implementation of some method while still coping
85 85 # with minor difference between implementation.
86 86
87 87 def _dirs_incr(self, filename, old_entry=None):
88 88 """increment the dirstate counter if applicable
89 89
90 90 This might be a no-op for some subclasses who deal with directory
91 91 tracking in a different way.
92 92 """
93 93
94 94 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
95 95 """decrement the dirstate counter if applicable
96 96
97 97 This might be a no-op for some subclasses who deal with directory
98 98 tracking in a different way.
99 99 """
100 100
101 101 def _refresh_entry(self, f, entry):
102 102 """record updated state of an entry"""
103 103
104 def _insert_entry(self, f, entry):
105 """add a new dirstate entry (or replace an unrelated one)
106
107 The fact it is actually new is the responsability of the caller
108 """
109
110 104 def _drop_entry(self, f):
111 105 """remove any entry for file f
112 106
113 107 This should also drop associated copy information
114 108
115 109 The fact we actually need to drop it is the responsability of the caller"""
116 110
117 111 ### method to manipulate the entries
118 112
119 113 def set_possibly_dirty(self, filename):
120 114 """record that the current state of the file on disk is unknown"""
121 115 entry = self[filename]
122 116 entry.set_possibly_dirty()
123 117 self._refresh_entry(filename, entry)
124 118
125 119 def set_clean(self, filename, mode, size, mtime):
126 120 """mark a file as back to a clean state"""
127 121 entry = self[filename]
128 122 size = size & rangemask
129 123 entry.set_clean(mode, size, mtime)
130 124 self._refresh_entry(filename, entry)
131 125 self.copymap.pop(filename, None)
132 126
133 127 def set_untracked(self, f):
134 128 """Mark a file as no longer tracked in the dirstate map"""
135 129 entry = self.get(f)
136 130 if entry is None:
137 131 return False
138 132 else:
139 133 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
140 134 if not entry.p2_info:
141 135 self.copymap.pop(f, None)
142 136 entry.set_untracked()
143 137 self._refresh_entry(f, entry)
144 138 return True
145 139
146 140 ### disk interaction
147 141
148 142 def _opendirstatefile(self):
149 143 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
150 144 if self._pendingmode is not None and self._pendingmode != mode:
151 145 fp.close()
152 146 raise error.Abort(
153 147 _(b'working directory state may be changed parallelly')
154 148 )
155 149 self._pendingmode = mode
156 150 return fp
157 151
158 152 def _readdirstatefile(self, size=-1):
159 153 try:
160 154 with self._opendirstatefile() as fp:
161 155 return fp.read(size)
162 156 except IOError as err:
163 157 if err.errno != errno.ENOENT:
164 158 raise
165 159 # File doesn't exist, so the current state is empty
166 160 return b''
167 161
168 162 @property
169 163 def docket(self):
170 164 if not self._docket:
171 165 if not self._use_dirstate_v2:
172 166 raise error.ProgrammingError(
173 167 b'dirstate only has a docket in v2 format'
174 168 )
175 169 self._docket = docketmod.DirstateDocket.parse(
176 170 self._readdirstatefile(), self._nodeconstants
177 171 )
178 172 return self._docket
179 173
180 174 def write_v2_no_append(self, tr, st, meta, packed):
181 175 old_docket = self.docket
182 176 new_docket = docketmod.DirstateDocket.with_new_uuid(
183 177 self.parents(), len(packed), meta
184 178 )
185 179 data_filename = new_docket.data_filename()
186 180 if tr:
187 181 tr.add(data_filename, 0)
188 182 self._opener.write(data_filename, packed)
189 183 # Write the new docket after the new data file has been
190 184 # written. Because `st` was opened with `atomictemp=True`,
191 185 # the actual `.hg/dirstate` file is only affected on close.
192 186 st.write(new_docket.serialize())
193 187 st.close()
194 188 # Remove the old data file after the new docket pointing to
195 189 # the new data file was written.
196 190 if old_docket.uuid:
197 191 data_filename = old_docket.data_filename()
198 192 unlink = lambda _tr=None: self._opener.unlink(data_filename)
199 193 if tr:
200 194 category = b"dirstate-v2-clean-" + old_docket.uuid
201 195 tr.addpostclose(category, unlink)
202 196 else:
203 197 unlink()
204 198 self._docket = new_docket
205 199
206 200 ### reading/setting parents
207 201
208 202 def parents(self):
209 203 if not self._parents:
210 204 if self._use_dirstate_v2:
211 205 self._parents = self.docket.parents
212 206 else:
213 207 read_len = self._nodelen * 2
214 208 st = self._readdirstatefile(read_len)
215 209 l = len(st)
216 210 if l == read_len:
217 211 self._parents = (
218 212 st[: self._nodelen],
219 213 st[self._nodelen : 2 * self._nodelen],
220 214 )
221 215 elif l == 0:
222 216 self._parents = (
223 217 self._nodeconstants.nullid,
224 218 self._nodeconstants.nullid,
225 219 )
226 220 else:
227 221 raise error.Abort(
228 222 _(b'working directory state appears damaged!')
229 223 )
230 224
231 225 return self._parents
232 226
233 227
234 228 class dirstatemap(_dirstatemapcommon):
235 229 """Map encapsulating the dirstate's contents.
236 230
237 231 The dirstate contains the following state:
238 232
239 233 - `identity` is the identity of the dirstate file, which can be used to
240 234 detect when changes have occurred to the dirstate file.
241 235
242 236 - `parents` is a pair containing the parents of the working copy. The
243 237 parents are updated by calling `setparents`.
244 238
245 239 - the state map maps filenames to tuples of (state, mode, size, mtime),
246 240 where state is a single character representing 'normal', 'added',
247 241 'removed', or 'merged'. It is read by treating the dirstate as a
248 242 dict. File state is updated by calling various methods (see each
249 243 documentation for details):
250 244
251 245 - `reset_state`,
252 246 - `set_tracked`
253 247 - `set_untracked`
254 248 - `set_clean`
255 249 - `set_possibly_dirty`
256 250
257 251 - `copymap` maps destination filenames to their source filename.
258 252
259 253 The dirstate also provides the following views onto the state:
260 254
261 255 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
262 256 form that they appear as in the dirstate.
263 257
264 258 - `dirfoldmap` is a dict mapping normalized directory names to the
265 259 denormalized form that they appear as in the dirstate.
266 260 """
267 261
268 262 ### Core data storage and access
269 263
270 264 @propertycache
271 265 def _map(self):
272 266 self._map = {}
273 267 self.read()
274 268 return self._map
275 269
276 270 @propertycache
277 271 def copymap(self):
278 272 self.copymap = {}
279 273 self._map
280 274 return self.copymap
281 275
282 276 def clear(self):
283 277 self._map.clear()
284 278 self.copymap.clear()
285 279 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
286 280 util.clearcachedproperty(self, b"_dirs")
287 281 util.clearcachedproperty(self, b"_alldirs")
288 282 util.clearcachedproperty(self, b"filefoldmap")
289 283 util.clearcachedproperty(self, b"dirfoldmap")
290 284
291 285 def items(self):
292 286 return self._map.items()
293 287
294 288 # forward for python2,3 compat
295 289 iteritems = items
296 290
297 291 def debug_iter(self, all):
298 292 """
299 293 Return an iterator of (filename, state, mode, size, mtime) tuples
300 294
301 295 `all` is unused when Rust is not enabled
302 296 """
303 297 for (filename, item) in self.items():
304 298 yield (filename, item.state, item.mode, item.size, item.mtime)
305 299
306 300 def keys(self):
307 301 return self._map.keys()
308 302
309 303 ### reading/setting parents
310 304
311 305 def setparents(self, p1, p2, fold_p2=False):
312 306 self._parents = (p1, p2)
313 307 self._dirtyparents = True
314 308 copies = {}
315 309 if fold_p2:
316 310 for f, s in self._map.items():
317 311 # Discard "merged" markers when moving away from a merge state
318 312 if s.p2_info:
319 313 source = self.copymap.pop(f, None)
320 314 if source:
321 315 copies[f] = source
322 316 s.drop_merge_data()
323 317 return copies
324 318
325 319 ### disk interaction
326 320
327 321 def read(self):
328 322 # ignore HG_PENDING because identity is used only for writing
329 323 self.identity = util.filestat.frompath(
330 324 self._opener.join(self._filename)
331 325 )
332 326
333 327 if self._use_dirstate_v2:
334 328 if not self.docket.uuid:
335 329 return
336 330 st = self._opener.read(self.docket.data_filename())
337 331 else:
338 332 st = self._readdirstatefile()
339 333
340 334 if not st:
341 335 return
342 336
343 337 # TODO: adjust this estimate for dirstate-v2
344 338 if util.safehasattr(parsers, b'dict_new_presized'):
345 339 # Make an estimate of the number of files in the dirstate based on
346 340 # its size. This trades wasting some memory for avoiding costly
347 341 # resizes. Each entry have a prefix of 17 bytes followed by one or
348 342 # two path names. Studies on various large-scale real-world repositories
349 343 # found 54 bytes a reasonable upper limit for the average path names.
350 344 # Copy entries are ignored for the sake of this estimate.
351 345 self._map = parsers.dict_new_presized(len(st) // 71)
352 346
353 347 # Python's garbage collector triggers a GC each time a certain number
354 348 # of container objects (the number being defined by
355 349 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
356 350 # for each file in the dirstate. The C version then immediately marks
357 351 # them as not to be tracked by the collector. However, this has no
358 352 # effect on when GCs are triggered, only on what objects the GC looks
359 353 # into. This means that O(number of files) GCs are unavoidable.
360 354 # Depending on when in the process's lifetime the dirstate is parsed,
361 355 # this can get very expensive. As a workaround, disable GC while
362 356 # parsing the dirstate.
363 357 #
364 358 # (we cannot decorate the function directly since it is in a C module)
365 359 if self._use_dirstate_v2:
366 360 p = self.docket.parents
367 361 meta = self.docket.tree_metadata
368 362 parse_dirstate = util.nogc(v2.parse_dirstate)
369 363 parse_dirstate(self._map, self.copymap, st, meta)
370 364 else:
371 365 parse_dirstate = util.nogc(parsers.parse_dirstate)
372 366 p = parse_dirstate(self._map, self.copymap, st)
373 367 if not self._dirtyparents:
374 368 self.setparents(*p)
375 369
376 370 # Avoid excess attribute lookups by fast pathing certain checks
377 371 self.__contains__ = self._map.__contains__
378 372 self.__getitem__ = self._map.__getitem__
379 373 self.get = self._map.get
380 374
381 375 def write(self, tr, st):
382 376 if self._use_dirstate_v2:
383 377 packed, meta = v2.pack_dirstate(self._map, self.copymap)
384 378 self.write_v2_no_append(tr, st, meta, packed)
385 379 else:
386 380 packed = parsers.pack_dirstate(
387 381 self._map, self.copymap, self.parents()
388 382 )
389 383 st.write(packed)
390 384 st.close()
391 385 self._dirtyparents = False
392 386
393 387 @propertycache
394 388 def identity(self):
395 389 self._map
396 390 return self.identity
397 391
398 392 ### code related to maintaining and accessing "extra" property
399 393 # (e.g. "has_dir")
400 394
401 395 def _dirs_incr(self, filename, old_entry=None):
402 396 """incremente the dirstate counter if applicable"""
403 397 if (
404 398 old_entry is None or old_entry.removed
405 399 ) and "_dirs" in self.__dict__:
406 400 self._dirs.addpath(filename)
407 401 if old_entry is None and "_alldirs" in self.__dict__:
408 402 self._alldirs.addpath(filename)
409 403
410 404 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
411 405 """decremente the dirstate counter if applicable"""
412 406 if old_entry is not None:
413 407 if "_dirs" in self.__dict__ and not old_entry.removed:
414 408 self._dirs.delpath(filename)
415 409 if "_alldirs" in self.__dict__ and not remove_variant:
416 410 self._alldirs.delpath(filename)
417 411 elif remove_variant and "_alldirs" in self.__dict__:
418 412 self._alldirs.addpath(filename)
419 413 if "filefoldmap" in self.__dict__:
420 414 normed = util.normcase(filename)
421 415 self.filefoldmap.pop(normed, None)
422 416
423 417 @propertycache
424 418 def filefoldmap(self):
425 419 """Returns a dictionary mapping normalized case paths to their
426 420 non-normalized versions.
427 421 """
428 422 try:
429 423 makefilefoldmap = parsers.make_file_foldmap
430 424 except AttributeError:
431 425 pass
432 426 else:
433 427 return makefilefoldmap(
434 428 self._map, util.normcasespec, util.normcasefallback
435 429 )
436 430
437 431 f = {}
438 432 normcase = util.normcase
439 433 for name, s in self._map.items():
440 434 if not s.removed:
441 435 f[normcase(name)] = name
442 436 f[b'.'] = b'.' # prevents useless util.fspath() invocation
443 437 return f
444 438
445 439 @propertycache
446 440 def dirfoldmap(self):
447 441 f = {}
448 442 normcase = util.normcase
449 443 for name in self._dirs:
450 444 f[normcase(name)] = name
451 445 return f
452 446
453 447 def hastrackeddir(self, d):
454 448 """
455 449 Returns True if the dirstate contains a tracked (not removed) file
456 450 in this directory.
457 451 """
458 452 return d in self._dirs
459 453
460 454 def hasdir(self, d):
461 455 """
462 456 Returns True if the dirstate contains a file (tracked or removed)
463 457 in this directory.
464 458 """
465 459 return d in self._alldirs
466 460
467 461 @propertycache
468 462 def _dirs(self):
469 463 return pathutil.dirs(self._map, only_tracked=True)
470 464
471 465 @propertycache
472 466 def _alldirs(self):
473 467 return pathutil.dirs(self._map)
474 468
475 469 ### code related to manipulation of entries and copy-sources
476 470
477 471 def reset_state(
478 472 self,
479 473 filename,
480 474 wc_tracked=False,
481 475 p1_tracked=False,
482 476 p2_info=False,
483 477 has_meaningful_mtime=True,
484 478 parentfiledata=None,
485 479 ):
486 480 """Set a entry to a given state, diregarding all previous state
487 481
488 482 This is to be used by the part of the dirstate API dedicated to
489 483 adjusting the dirstate after a update/merge.
490 484
491 485 note: calling this might result to no entry existing at all if the
492 486 dirstate map does not see any point at having one for this file
493 487 anymore.
494 488 """
495 489 # copy information are now outdated
496 490 # (maybe new information should be in directly passed to this function)
497 491 self.copymap.pop(filename, None)
498 492
499 493 if not (p1_tracked or p2_info or wc_tracked):
500 494 old_entry = self._map.get(filename)
501 495 self._drop_entry(filename)
502 496 self._dirs_decr(filename, old_entry=old_entry)
503 497 return
504 498
505 499 old_entry = self._map.get(filename)
506 500 self._dirs_incr(filename, old_entry)
507 501 entry = DirstateItem(
508 502 wc_tracked=wc_tracked,
509 503 p1_tracked=p1_tracked,
510 504 p2_info=p2_info,
511 505 has_meaningful_mtime=has_meaningful_mtime,
512 506 parentfiledata=parentfiledata,
513 507 )
514 self._insert_entry(filename, entry)
508 self._map[filename] = entry
515 509
516 510 def set_tracked(self, filename):
517 511 new = False
518 512 entry = self.get(filename)
519 513 if entry is None:
520 514 self._dirs_incr(filename)
521 515 entry = DirstateItem(
522 516 wc_tracked=True,
523 517 )
524 518
525 self._insert_entry(filename, entry)
519 self._map[filename] = entry
526 520 new = True
527 521 elif not entry.tracked:
528 522 self._dirs_incr(filename, entry)
529 523 entry.set_tracked()
530 524 self._refresh_entry(filename, entry)
531 525 new = True
532 526 else:
533 527 # XXX This is probably overkill for more case, but we need this to
534 528 # fully replace the `normallookup` call with `set_tracked` one.
535 529 # Consider smoothing this in the future.
536 530 entry.set_possibly_dirty()
537 531 self._refresh_entry(filename, entry)
538 532 return new
539 533
540 534 def _refresh_entry(self, f, entry):
541 535 if not entry.any_tracked:
542 536 self._map.pop(f, None)
543 537
544 def _insert_entry(self, f, entry):
545 self._map[f] = entry
546
547 538 def _drop_entry(self, f):
548 539 self._map.pop(f, None)
549 540 self.copymap.pop(f, None)
550 541
551 542
552 543 if rustmod is not None:
553 544
554 545 class dirstatemap(_dirstatemapcommon):
555 546
556 547 ### Core data storage and access
557 548
558 549 @propertycache
559 550 def _map(self):
560 551 """
561 552 Fills the Dirstatemap when called.
562 553 """
563 554 # ignore HG_PENDING because identity is used only for writing
564 555 self.identity = util.filestat.frompath(
565 556 self._opener.join(self._filename)
566 557 )
567 558
568 559 if self._use_dirstate_v2:
569 560 if self.docket.uuid:
570 561 # TODO: use mmap when possible
571 562 data = self._opener.read(self.docket.data_filename())
572 563 else:
573 564 data = b''
574 565 self._map = rustmod.DirstateMap.new_v2(
575 566 data, self.docket.data_size, self.docket.tree_metadata
576 567 )
577 568 parents = self.docket.parents
578 569 else:
579 570 self._map, parents = rustmod.DirstateMap.new_v1(
580 571 self._readdirstatefile()
581 572 )
582 573
583 574 if parents and not self._dirtyparents:
584 575 self.setparents(*parents)
585 576
586 577 self.__contains__ = self._map.__contains__
587 578 self.__getitem__ = self._map.__getitem__
588 579 self.get = self._map.get
589 580 return self._map
590 581
591 582 @property
592 583 def copymap(self):
593 584 return self._map.copymap()
594 585
595 586 def debug_iter(self, all):
596 587 """
597 588 Return an iterator of (filename, state, mode, size, mtime) tuples
598 589
599 590 `all`: also include with `state == b' '` dirstate tree nodes that
600 591 don't have an associated `DirstateItem`.
601 592
602 593 """
603 594 return self._map.debug_iter(all)
604 595
605 596 def clear(self):
606 597 self._map.clear()
607 598 self.setparents(
608 599 self._nodeconstants.nullid, self._nodeconstants.nullid
609 600 )
610 601 util.clearcachedproperty(self, b"_dirs")
611 602 util.clearcachedproperty(self, b"_alldirs")
612 603 util.clearcachedproperty(self, b"dirfoldmap")
613 604
614 605 def items(self):
615 606 return self._map.items()
616 607
617 608 # forward for python2,3 compat
618 609 iteritems = items
619 610
620 611 def keys(self):
621 612 return iter(self._map)
622 613
623 614 ### reading/setting parents
624 615
625 616 def setparents(self, p1, p2, fold_p2=False):
626 617 self._parents = (p1, p2)
627 618 self._dirtyparents = True
628 619 copies = {}
629 620 if fold_p2:
630 621 # Collect into an intermediate list to avoid a `RuntimeError`
631 622 # exception due to mutation during iteration.
632 623 # TODO: move this the whole loop to Rust where `iter_mut`
633 624 # enables in-place mutation of elements of a collection while
634 625 # iterating it, without mutating the collection itself.
635 626 files_with_p2_info = [
636 627 f for f, s in self._map.items() if s.p2_info
637 628 ]
638 629 rust_map = self._map
639 630 for f in files_with_p2_info:
640 631 e = rust_map.get(f)
641 632 source = self.copymap.pop(f, None)
642 633 if source:
643 634 copies[f] = source
644 635 e.drop_merge_data()
645 636 rust_map.set_dirstate_item(f, e)
646 637 return copies
647 638
648 639 ### disk interaction
649 640
650 641 @propertycache
651 642 def identity(self):
652 643 self._map
653 644 return self.identity
654 645
655 646 def write(self, tr, st):
656 647 if not self._use_dirstate_v2:
657 648 p1, p2 = self.parents()
658 649 packed = self._map.write_v1(p1, p2)
659 650 st.write(packed)
660 651 st.close()
661 652 self._dirtyparents = False
662 653 return
663 654
664 655 # We can only append to an existing data file if there is one
665 656 can_append = self.docket.uuid is not None
666 657 packed, meta, append = self._map.write_v2(can_append)
667 658 if append:
668 659 docket = self.docket
669 660 data_filename = docket.data_filename()
670 661 if tr:
671 662 tr.add(data_filename, docket.data_size)
672 663 with self._opener(data_filename, b'r+b') as fp:
673 664 fp.seek(docket.data_size)
674 665 assert fp.tell() == docket.data_size
675 666 written = fp.write(packed)
676 667 if written is not None: # py2 may return None
677 668 assert written == len(packed), (written, len(packed))
678 669 docket.data_size += len(packed)
679 670 docket.parents = self.parents()
680 671 docket.tree_metadata = meta
681 672 st.write(docket.serialize())
682 673 st.close()
683 674 else:
684 675 self.write_v2_no_append(tr, st, meta, packed)
685 676 # Reload from the newly-written file
686 677 util.clearcachedproperty(self, b"_map")
687 678 self._dirtyparents = False
688 679
689 680 ### code related to maintaining and accessing "extra" property
690 681 # (e.g. "has_dir")
691 682
692 683 @propertycache
693 684 def filefoldmap(self):
694 685 """Returns a dictionary mapping normalized case paths to their
695 686 non-normalized versions.
696 687 """
697 688 return self._map.filefoldmapasdict()
698 689
699 690 def hastrackeddir(self, d):
700 691 return self._map.hastrackeddir(d)
701 692
702 693 def hasdir(self, d):
703 694 return self._map.hasdir(d)
704 695
705 696 @propertycache
706 697 def dirfoldmap(self):
707 698 f = {}
708 699 normcase = util.normcase
709 700 for name in self._map.tracked_dirs():
710 701 f[normcase(name)] = name
711 702 return f
712 703
713 704 ### code related to manipulation of entries and copy-sources
714 705
715 706 def _refresh_entry(self, f, entry):
716 707 if not entry.any_tracked:
717 708 self._map.drop_item_and_copy_source(f)
718 709 else:
719 710 self._map.addfile(f, entry)
720 711
721 def _insert_entry(self, f, entry):
722 self._map.addfile(f, entry)
723
724 712 def set_tracked(self, f):
725 713 return self._map.set_tracked(f)
726 714
727 715 def reset_state(
728 716 self,
729 717 filename,
730 718 wc_tracked=False,
731 719 p1_tracked=False,
732 720 p2_info=False,
733 721 has_meaningful_mtime=True,
734 722 parentfiledata=None,
735 723 ):
736 724 return self._map.reset_state(
737 725 filename,
738 726 wc_tracked,
739 727 p1_tracked,
740 728 p2_info,
741 729 has_meaningful_mtime,
742 730 parentfiledata,
743 731 )
744 732
745 733 def _drop_entry(self, f):
746 734 self._map.drop_item_and_copy_source(f)
747 735
748 736 def __setitem__(self, key, value):
749 737 assert isinstance(value, DirstateItem)
750 738 self._map.set_dirstate_item(key, value)
General Comments 0
You need to be logged in to leave comments. Login now