##// END OF EJS Templates
dirstate: remove a update_file's special case for tracked file with p2 data...
marmoute -
r48920:d4e715d2 default
parent child Browse files
Show More
@@ -1,926 +1,917 b''
1 1 # dirstatemap.py
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import absolute_import
7 7
8 8 import errno
9 9
10 10 from .i18n import _
11 11
12 12 from . import (
13 13 error,
14 14 pathutil,
15 15 policy,
16 16 pycompat,
17 17 txnutil,
18 18 util,
19 19 )
20 20
21 21 from .dirstateutils import (
22 22 docket as docketmod,
23 23 )
24 24
25 25 parsers = policy.importmod('parsers')
26 26 rustmod = policy.importrust('dirstate')
27 27
28 28 propertycache = util.propertycache
29 29
30 30 if rustmod is None:
31 31 DirstateItem = parsers.DirstateItem
32 32 else:
33 33 DirstateItem = rustmod.DirstateItem
34 34
35 35 rangemask = 0x7FFFFFFF
36 36
37 37
38 38 class dirstatemap(object):
39 39 """Map encapsulating the dirstate's contents.
40 40
41 41 The dirstate contains the following state:
42 42
43 43 - `identity` is the identity of the dirstate file, which can be used to
44 44 detect when changes have occurred to the dirstate file.
45 45
46 46 - `parents` is a pair containing the parents of the working copy. The
47 47 parents are updated by calling `setparents`.
48 48
49 49 - the state map maps filenames to tuples of (state, mode, size, mtime),
50 50 where state is a single character representing 'normal', 'added',
51 51 'removed', or 'merged'. It is read by treating the dirstate as a
52 52 dict. File state is updated by calling various methods (see each
53 53 documentation for details):
54 54
55 55 - `reset_state`,
56 56 - `set_tracked`
57 57 - `set_untracked`
58 58 - `set_clean`
59 59 - `set_possibly_dirty`
60 60
61 61 - `copymap` maps destination filenames to their source filename.
62 62
63 63 The dirstate also provides the following views onto the state:
64 64
65 65 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
66 66 form that they appear as in the dirstate.
67 67
68 68 - `dirfoldmap` is a dict mapping normalized directory names to the
69 69 denormalized form that they appear as in the dirstate.
70 70 """
71 71
72 72 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
73 73 self._ui = ui
74 74 self._opener = opener
75 75 self._root = root
76 76 self._filename = b'dirstate'
77 77 self._nodelen = 20
78 78 self._nodeconstants = nodeconstants
79 79 assert (
80 80 not use_dirstate_v2
81 81 ), "should have detected unsupported requirement"
82 82
83 83 self._parents = None
84 84 self._dirtyparents = False
85 85
86 86 # for consistent view between _pl() and _read() invocations
87 87 self._pendingmode = None
88 88
89 89 @propertycache
90 90 def _map(self):
91 91 self._map = {}
92 92 self.read()
93 93 return self._map
94 94
95 95 @propertycache
96 96 def copymap(self):
97 97 self.copymap = {}
98 98 self._map
99 99 return self.copymap
100 100
101 101 def clear(self):
102 102 self._map.clear()
103 103 self.copymap.clear()
104 104 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
105 105 util.clearcachedproperty(self, b"_dirs")
106 106 util.clearcachedproperty(self, b"_alldirs")
107 107 util.clearcachedproperty(self, b"filefoldmap")
108 108 util.clearcachedproperty(self, b"dirfoldmap")
109 109
110 110 def items(self):
111 111 return pycompat.iteritems(self._map)
112 112
113 113 # forward for python2,3 compat
114 114 iteritems = items
115 115
116 116 def debug_iter(self, all):
117 117 """
118 118 Return an iterator of (filename, state, mode, size, mtime) tuples
119 119
120 120 `all` is unused when Rust is not enabled
121 121 """
122 122 for (filename, item) in self.items():
123 123 yield (filename, item.state, item.mode, item.size, item.mtime)
124 124
125 125 def __len__(self):
126 126 return len(self._map)
127 127
128 128 def __iter__(self):
129 129 return iter(self._map)
130 130
131 131 def get(self, key, default=None):
132 132 return self._map.get(key, default)
133 133
134 134 def __contains__(self, key):
135 135 return key in self._map
136 136
137 137 def __getitem__(self, key):
138 138 return self._map[key]
139 139
140 140 def keys(self):
141 141 return self._map.keys()
142 142
143 143 def preload(self):
144 144 """Loads the underlying data, if it's not already loaded"""
145 145 self._map
146 146
147 147 def _dirs_incr(self, filename, old_entry=None):
148 148 """incremente the dirstate counter if applicable"""
149 149 if (
150 150 old_entry is None or old_entry.removed
151 151 ) and "_dirs" in self.__dict__:
152 152 self._dirs.addpath(filename)
153 153 if old_entry is None and "_alldirs" in self.__dict__:
154 154 self._alldirs.addpath(filename)
155 155
156 156 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
157 157 """decremente the dirstate counter if applicable"""
158 158 if old_entry is not None:
159 159 if "_dirs" in self.__dict__ and not old_entry.removed:
160 160 self._dirs.delpath(filename)
161 161 if "_alldirs" in self.__dict__ and not remove_variant:
162 162 self._alldirs.delpath(filename)
163 163 elif remove_variant and "_alldirs" in self.__dict__:
164 164 self._alldirs.addpath(filename)
165 165 if "filefoldmap" in self.__dict__:
166 166 normed = util.normcase(filename)
167 167 self.filefoldmap.pop(normed, None)
168 168
169 169 def set_possibly_dirty(self, filename):
170 170 """record that the current state of the file on disk is unknown"""
171 171 self[filename].set_possibly_dirty()
172 172
173 173 def set_clean(self, filename, mode, size, mtime):
174 174 """mark a file as back to a clean state"""
175 175 entry = self[filename]
176 176 mtime = mtime & rangemask
177 177 size = size & rangemask
178 178 entry.set_clean(mode, size, mtime)
179 179 self.copymap.pop(filename, None)
180 180
181 181 def reset_state(
182 182 self,
183 183 filename,
184 184 wc_tracked=False,
185 185 p1_tracked=False,
186 186 p2_tracked=False,
187 187 merged=False,
188 188 clean_p1=False,
189 189 clean_p2=False,
190 190 possibly_dirty=False,
191 191 parentfiledata=None,
192 192 ):
193 193 """Set a entry to a given state, diregarding all previous state
194 194
195 195 This is to be used by the part of the dirstate API dedicated to
196 196 adjusting the dirstate after a update/merge.
197 197
198 198 note: calling this might result to no entry existing at all if the
199 199 dirstate map does not see any point at having one for this file
200 200 anymore.
201 201 """
202 202 if merged and (clean_p1 or clean_p2):
203 203 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
204 204 raise error.ProgrammingError(msg)
205 205 # copy information are now outdated
206 206 # (maybe new information should be in directly passed to this function)
207 207 self.copymap.pop(filename, None)
208 208
209 209 if not (p1_tracked or p2_tracked or wc_tracked):
210 210 old_entry = self._map.pop(filename, None)
211 211 self._dirs_decr(filename, old_entry=old_entry)
212 212 self.copymap.pop(filename, None)
213 213 return
214 214 elif merged:
215 215 # XXX might be merged and removed ?
216 216 entry = self.get(filename)
217 217 if entry is None or not entry.tracked:
218 218 # XXX mostly replicate dirstate.other parent. We should get
219 219 # the higher layer to pass us more reliable data where `merged`
220 220 # actually mean merged. Dropping this clause will show failure
221 221 # in `test-graft.t`
222 222 merged = False
223 223 clean_p2 = True
224 224 elif not (p1_tracked or p2_tracked) and wc_tracked:
225 225 pass # file is added, nothing special to adjust
226 226 elif (p1_tracked or p2_tracked) and not wc_tracked:
227 227 pass
228 228 elif clean_p2 and wc_tracked:
229 if p1_tracked or self.get(filename) is not None:
230 # XXX the `self.get` call is catching some case in
231 # `test-merge-remove.t` where the file is tracked in p1, the
232 # p1_tracked argument is False.
233 #
234 # In addition, this seems to be a case where the file is marked
235 # as merged without actually being the result of a merge
236 # action. So thing are not ideal here.
237 merged = True
238 clean_p2 = False
229 pass
239 230 elif not p1_tracked and p2_tracked and wc_tracked:
240 231 clean_p2 = True
241 232 elif possibly_dirty:
242 233 pass
243 234 elif wc_tracked:
244 235 # this is a "normal" file
245 236 if parentfiledata is None:
246 237 msg = b'failed to pass parentfiledata for a normal file: %s'
247 238 msg %= filename
248 239 raise error.ProgrammingError(msg)
249 240 else:
250 241 assert False, 'unreachable'
251 242
252 243 old_entry = self._map.get(filename)
253 244 self._dirs_incr(filename, old_entry)
254 245 entry = DirstateItem(
255 246 wc_tracked=wc_tracked,
256 247 p1_tracked=p1_tracked,
257 248 p2_tracked=p2_tracked,
258 249 merged=merged,
259 250 clean_p1=clean_p1,
260 251 clean_p2=clean_p2,
261 252 possibly_dirty=possibly_dirty,
262 253 parentfiledata=parentfiledata,
263 254 )
264 255 self._map[filename] = entry
265 256
266 257 def set_tracked(self, filename):
267 258 new = False
268 259 entry = self.get(filename)
269 260 if entry is None:
270 261 self._dirs_incr(filename)
271 262 entry = DirstateItem(
272 263 p1_tracked=False,
273 264 p2_tracked=False,
274 265 wc_tracked=True,
275 266 merged=False,
276 267 clean_p1=False,
277 268 clean_p2=False,
278 269 possibly_dirty=False,
279 270 parentfiledata=None,
280 271 )
281 272 self._map[filename] = entry
282 273 new = True
283 274 elif not entry.tracked:
284 275 self._dirs_incr(filename, entry)
285 276 entry.set_tracked()
286 277 new = True
287 278 else:
288 279 # XXX This is probably overkill for more case, but we need this to
289 280 # fully replace the `normallookup` call with `set_tracked` one.
290 281 # Consider smoothing this in the future.
291 282 self.set_possibly_dirty(filename)
292 283 return new
293 284
294 285 def set_untracked(self, f):
295 286 """Mark a file as no longer tracked in the dirstate map"""
296 287 entry = self.get(f)
297 288 if entry is None:
298 289 return False
299 290 else:
300 291 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
301 292 if not entry.merged:
302 293 self.copymap.pop(f, None)
303 294 if entry.added:
304 295 self._map.pop(f, None)
305 296 else:
306 297 entry.set_untracked()
307 298 return True
308 299
309 300 @propertycache
310 301 def filefoldmap(self):
311 302 """Returns a dictionary mapping normalized case paths to their
312 303 non-normalized versions.
313 304 """
314 305 try:
315 306 makefilefoldmap = parsers.make_file_foldmap
316 307 except AttributeError:
317 308 pass
318 309 else:
319 310 return makefilefoldmap(
320 311 self._map, util.normcasespec, util.normcasefallback
321 312 )
322 313
323 314 f = {}
324 315 normcase = util.normcase
325 316 for name, s in pycompat.iteritems(self._map):
326 317 if not s.removed:
327 318 f[normcase(name)] = name
328 319 f[b'.'] = b'.' # prevents useless util.fspath() invocation
329 320 return f
330 321
331 322 def hastrackeddir(self, d):
332 323 """
333 324 Returns True if the dirstate contains a tracked (not removed) file
334 325 in this directory.
335 326 """
336 327 return d in self._dirs
337 328
338 329 def hasdir(self, d):
339 330 """
340 331 Returns True if the dirstate contains a file (tracked or removed)
341 332 in this directory.
342 333 """
343 334 return d in self._alldirs
344 335
345 336 @propertycache
346 337 def _dirs(self):
347 338 return pathutil.dirs(self._map, only_tracked=True)
348 339
349 340 @propertycache
350 341 def _alldirs(self):
351 342 return pathutil.dirs(self._map)
352 343
353 344 def _opendirstatefile(self):
354 345 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
355 346 if self._pendingmode is not None and self._pendingmode != mode:
356 347 fp.close()
357 348 raise error.Abort(
358 349 _(b'working directory state may be changed parallelly')
359 350 )
360 351 self._pendingmode = mode
361 352 return fp
362 353
363 354 def parents(self):
364 355 if not self._parents:
365 356 try:
366 357 fp = self._opendirstatefile()
367 358 st = fp.read(2 * self._nodelen)
368 359 fp.close()
369 360 except IOError as err:
370 361 if err.errno != errno.ENOENT:
371 362 raise
372 363 # File doesn't exist, so the current state is empty
373 364 st = b''
374 365
375 366 l = len(st)
376 367 if l == self._nodelen * 2:
377 368 self._parents = (
378 369 st[: self._nodelen],
379 370 st[self._nodelen : 2 * self._nodelen],
380 371 )
381 372 elif l == 0:
382 373 self._parents = (
383 374 self._nodeconstants.nullid,
384 375 self._nodeconstants.nullid,
385 376 )
386 377 else:
387 378 raise error.Abort(
388 379 _(b'working directory state appears damaged!')
389 380 )
390 381
391 382 return self._parents
392 383
393 384 def setparents(self, p1, p2, fold_p2=False):
394 385 self._parents = (p1, p2)
395 386 self._dirtyparents = True
396 387 copies = {}
397 388 if fold_p2:
398 389 for f, s in pycompat.iteritems(self._map):
399 390 # Discard "merged" markers when moving away from a merge state
400 391 if s.merged or s.from_p2:
401 392 source = self.copymap.pop(f, None)
402 393 if source:
403 394 copies[f] = source
404 395 s.drop_merge_data()
405 396 return copies
406 397
407 398 def read(self):
408 399 # ignore HG_PENDING because identity is used only for writing
409 400 self.identity = util.filestat.frompath(
410 401 self._opener.join(self._filename)
411 402 )
412 403
413 404 try:
414 405 fp = self._opendirstatefile()
415 406 try:
416 407 st = fp.read()
417 408 finally:
418 409 fp.close()
419 410 except IOError as err:
420 411 if err.errno != errno.ENOENT:
421 412 raise
422 413 return
423 414 if not st:
424 415 return
425 416
426 417 if util.safehasattr(parsers, b'dict_new_presized'):
427 418 # Make an estimate of the number of files in the dirstate based on
428 419 # its size. This trades wasting some memory for avoiding costly
429 420 # resizes. Each entry have a prefix of 17 bytes followed by one or
430 421 # two path names. Studies on various large-scale real-world repositories
431 422 # found 54 bytes a reasonable upper limit for the average path names.
432 423 # Copy entries are ignored for the sake of this estimate.
433 424 self._map = parsers.dict_new_presized(len(st) // 71)
434 425
435 426 # Python's garbage collector triggers a GC each time a certain number
436 427 # of container objects (the number being defined by
437 428 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
438 429 # for each file in the dirstate. The C version then immediately marks
439 430 # them as not to be tracked by the collector. However, this has no
440 431 # effect on when GCs are triggered, only on what objects the GC looks
441 432 # into. This means that O(number of files) GCs are unavoidable.
442 433 # Depending on when in the process's lifetime the dirstate is parsed,
443 434 # this can get very expensive. As a workaround, disable GC while
444 435 # parsing the dirstate.
445 436 #
446 437 # (we cannot decorate the function directly since it is in a C module)
447 438 parse_dirstate = util.nogc(parsers.parse_dirstate)
448 439 p = parse_dirstate(self._map, self.copymap, st)
449 440 if not self._dirtyparents:
450 441 self.setparents(*p)
451 442
452 443 # Avoid excess attribute lookups by fast pathing certain checks
453 444 self.__contains__ = self._map.__contains__
454 445 self.__getitem__ = self._map.__getitem__
455 446 self.get = self._map.get
456 447
457 448 def write(self, _tr, st, now):
458 449 st.write(
459 450 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
460 451 )
461 452 st.close()
462 453 self._dirtyparents = False
463 454
464 455 @propertycache
465 456 def identity(self):
466 457 self._map
467 458 return self.identity
468 459
469 460 @propertycache
470 461 def dirfoldmap(self):
471 462 f = {}
472 463 normcase = util.normcase
473 464 for name in self._dirs:
474 465 f[normcase(name)] = name
475 466 return f
476 467
477 468
478 469 if rustmod is not None:
479 470
480 471 class dirstatemap(object):
481 472 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
482 473 self._use_dirstate_v2 = use_dirstate_v2
483 474 self._nodeconstants = nodeconstants
484 475 self._ui = ui
485 476 self._opener = opener
486 477 self._root = root
487 478 self._filename = b'dirstate'
488 479 self._nodelen = 20 # Also update Rust code when changing this!
489 480 self._parents = None
490 481 self._dirtyparents = False
491 482 self._docket = None
492 483
493 484 # for consistent view between _pl() and _read() invocations
494 485 self._pendingmode = None
495 486
496 487 def addfile(
497 488 self,
498 489 f,
499 490 mode=0,
500 491 size=None,
501 492 mtime=None,
502 493 added=False,
503 494 merged=False,
504 495 from_p2=False,
505 496 possibly_dirty=False,
506 497 ):
507 498 if added:
508 499 assert not possibly_dirty
509 500 assert not from_p2
510 501 item = DirstateItem.new_added()
511 502 elif merged:
512 503 assert not possibly_dirty
513 504 assert not from_p2
514 505 item = DirstateItem.new_merged()
515 506 elif from_p2:
516 507 assert not possibly_dirty
517 508 item = DirstateItem.new_from_p2()
518 509 elif possibly_dirty:
519 510 item = DirstateItem.new_possibly_dirty()
520 511 else:
521 512 assert size is not None
522 513 assert mtime is not None
523 514 size = size & rangemask
524 515 mtime = mtime & rangemask
525 516 item = DirstateItem.new_normal(mode, size, mtime)
526 517 self._rustmap.addfile(f, item)
527 518 if added:
528 519 self.copymap.pop(f, None)
529 520
530 521 def reset_state(
531 522 self,
532 523 filename,
533 524 wc_tracked=False,
534 525 p1_tracked=False,
535 526 p2_tracked=False,
536 527 merged=False,
537 528 clean_p1=False,
538 529 clean_p2=False,
539 530 possibly_dirty=False,
540 531 parentfiledata=None,
541 532 ):
542 533 """Set a entry to a given state, disregarding all previous state
543 534
544 535 This is to be used by the part of the dirstate API dedicated to
545 536 adjusting the dirstate after a update/merge.
546 537
547 538 note: calling this might result to no entry existing at all if the
548 539 dirstate map does not see any point at having one for this file
549 540 anymore.
550 541 """
551 542 if merged and (clean_p1 or clean_p2):
552 543 msg = (
553 544 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
554 545 )
555 546 raise error.ProgrammingError(msg)
556 547 # copy information are now outdated
557 548 # (maybe new information should be in directly passed to this function)
558 549 self.copymap.pop(filename, None)
559 550
560 551 if not (p1_tracked or p2_tracked or wc_tracked):
561 552 self._rustmap.drop_item_and_copy_source(filename)
562 553 elif merged:
563 554 # XXX might be merged and removed ?
564 555 entry = self.get(filename)
565 556 if entry is not None and entry.tracked:
566 557 # XXX mostly replicate dirstate.other parent. We should get
567 558 # the higher layer to pass us more reliable data where `merged`
568 559 # actually mean merged. Dropping the else clause will show
569 560 # failure in `test-graft.t`
570 561 self.addfile(filename, merged=True)
571 562 else:
572 563 self.addfile(filename, from_p2=True)
573 564 elif not (p1_tracked or p2_tracked) and wc_tracked:
574 565 self.addfile(
575 566 filename, added=True, possibly_dirty=possibly_dirty
576 567 )
577 568 elif (p1_tracked or p2_tracked) and not wc_tracked:
578 569 # XXX might be merged and removed ?
579 570 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
580 571 elif clean_p2 and wc_tracked:
581 572 if p1_tracked or self.get(filename) is not None:
582 573 # XXX the `self.get` call is catching some case in
583 574 # `test-merge-remove.t` where the file is tracked in p1, the
584 575 # p1_tracked argument is False.
585 576 #
586 577 # In addition, this seems to be a case where the file is marked
587 578 # as merged without actually being the result of a merge
588 579 # action. So thing are not ideal here.
589 580 self.addfile(filename, merged=True)
590 581 else:
591 582 self.addfile(filename, from_p2=True)
592 583 elif not p1_tracked and p2_tracked and wc_tracked:
593 584 self.addfile(
594 585 filename, from_p2=True, possibly_dirty=possibly_dirty
595 586 )
596 587 elif possibly_dirty:
597 588 self.addfile(filename, possibly_dirty=possibly_dirty)
598 589 elif wc_tracked:
599 590 # this is a "normal" file
600 591 if parentfiledata is None:
601 592 msg = b'failed to pass parentfiledata for a normal file: %s'
602 593 msg %= filename
603 594 raise error.ProgrammingError(msg)
604 595 mode, size, mtime = parentfiledata
605 596 self.addfile(filename, mode=mode, size=size, mtime=mtime)
606 597 else:
607 598 assert False, 'unreachable'
608 599
609 600 def set_tracked(self, filename):
610 601 new = False
611 602 entry = self.get(filename)
612 603 if entry is None:
613 604 self.addfile(filename, added=True)
614 605 new = True
615 606 elif not entry.tracked:
616 607 entry.set_tracked()
617 608 self._rustmap.set_dirstate_item(filename, entry)
618 609 new = True
619 610 else:
620 611 # XXX This is probably overkill for more case, but we need this to
621 612 # fully replace the `normallookup` call with `set_tracked` one.
622 613 # Consider smoothing this in the future.
623 614 self.set_possibly_dirty(filename)
624 615 return new
625 616
626 617 def set_untracked(self, f):
627 618 """Mark a file as no longer tracked in the dirstate map"""
628 619 # in merge is only trigger more logic, so it "fine" to pass it.
629 620 #
630 621 # the inner rust dirstate map code need to be adjusted once the API
631 622 # for dirstate/dirstatemap/DirstateItem is a bit more settled
632 623 entry = self.get(f)
633 624 if entry is None:
634 625 return False
635 626 else:
636 627 if entry.added:
637 628 self._rustmap.drop_item_and_copy_source(f)
638 629 else:
639 630 self._rustmap.removefile(f, in_merge=True)
640 631 return True
641 632
642 633 def removefile(self, *args, **kwargs):
643 634 return self._rustmap.removefile(*args, **kwargs)
644 635
645 636 def get(self, *args, **kwargs):
646 637 return self._rustmap.get(*args, **kwargs)
647 638
648 639 @property
649 640 def copymap(self):
650 641 return self._rustmap.copymap()
651 642
652 643 def debug_iter(self, all):
653 644 """
654 645 Return an iterator of (filename, state, mode, size, mtime) tuples
655 646
656 647 `all`: also include with `state == b' '` dirstate tree nodes that
657 648 don't have an associated `DirstateItem`.
658 649
659 650 """
660 651 return self._rustmap.debug_iter(all)
661 652
662 653 def preload(self):
663 654 self._rustmap
664 655
665 656 def clear(self):
666 657 self._rustmap.clear()
667 658 self.setparents(
668 659 self._nodeconstants.nullid, self._nodeconstants.nullid
669 660 )
670 661 util.clearcachedproperty(self, b"_dirs")
671 662 util.clearcachedproperty(self, b"_alldirs")
672 663 util.clearcachedproperty(self, b"dirfoldmap")
673 664
674 665 def items(self):
675 666 return self._rustmap.items()
676 667
677 668 def keys(self):
678 669 return iter(self._rustmap)
679 670
680 671 def __contains__(self, key):
681 672 return key in self._rustmap
682 673
683 674 def __getitem__(self, item):
684 675 return self._rustmap[item]
685 676
686 677 def __len__(self):
687 678 return len(self._rustmap)
688 679
689 680 def __iter__(self):
690 681 return iter(self._rustmap)
691 682
692 683 # forward for python2,3 compat
693 684 iteritems = items
694 685
695 686 def _opendirstatefile(self):
696 687 fp, mode = txnutil.trypending(
697 688 self._root, self._opener, self._filename
698 689 )
699 690 if self._pendingmode is not None and self._pendingmode != mode:
700 691 fp.close()
701 692 raise error.Abort(
702 693 _(b'working directory state may be changed parallelly')
703 694 )
704 695 self._pendingmode = mode
705 696 return fp
706 697
707 698 def _readdirstatefile(self, size=-1):
708 699 try:
709 700 with self._opendirstatefile() as fp:
710 701 return fp.read(size)
711 702 except IOError as err:
712 703 if err.errno != errno.ENOENT:
713 704 raise
714 705 # File doesn't exist, so the current state is empty
715 706 return b''
716 707
717 708 def setparents(self, p1, p2, fold_p2=False):
718 709 self._parents = (p1, p2)
719 710 self._dirtyparents = True
720 711 copies = {}
721 712 if fold_p2:
722 713 # Collect into an intermediate list to avoid a `RuntimeError`
723 714 # exception due to mutation during iteration.
724 715 # TODO: move this the whole loop to Rust where `iter_mut`
725 716 # enables in-place mutation of elements of a collection while
726 717 # iterating it, without mutating the collection itself.
727 718 candidatefiles = [
728 719 (f, s)
729 720 for f, s in self._rustmap.items()
730 721 if s.merged or s.from_p2
731 722 ]
732 723 for f, s in candidatefiles:
733 724 # Discard "merged" markers when moving away from a merge state
734 725 if s.merged:
735 726 source = self.copymap.get(f)
736 727 if source:
737 728 copies[f] = source
738 729 self.reset_state(
739 730 f,
740 731 wc_tracked=True,
741 732 p1_tracked=True,
742 733 possibly_dirty=True,
743 734 )
744 735 # Also fix up otherparent markers
745 736 elif s.from_p2:
746 737 source = self.copymap.get(f)
747 738 if source:
748 739 copies[f] = source
749 740 self.reset_state(
750 741 f,
751 742 p1_tracked=False,
752 743 wc_tracked=True,
753 744 )
754 745 return copies
755 746
756 747 def parents(self):
757 748 if not self._parents:
758 749 if self._use_dirstate_v2:
759 750 self._parents = self.docket.parents
760 751 else:
761 752 read_len = self._nodelen * 2
762 753 st = self._readdirstatefile(read_len)
763 754 l = len(st)
764 755 if l == read_len:
765 756 self._parents = (
766 757 st[: self._nodelen],
767 758 st[self._nodelen : 2 * self._nodelen],
768 759 )
769 760 elif l == 0:
770 761 self._parents = (
771 762 self._nodeconstants.nullid,
772 763 self._nodeconstants.nullid,
773 764 )
774 765 else:
775 766 raise error.Abort(
776 767 _(b'working directory state appears damaged!')
777 768 )
778 769
779 770 return self._parents
780 771
781 772 @property
782 773 def docket(self):
783 774 if not self._docket:
784 775 if not self._use_dirstate_v2:
785 776 raise error.ProgrammingError(
786 777 b'dirstate only has a docket in v2 format'
787 778 )
788 779 self._docket = docketmod.DirstateDocket.parse(
789 780 self._readdirstatefile(), self._nodeconstants
790 781 )
791 782 return self._docket
792 783
793 784 @propertycache
794 785 def _rustmap(self):
795 786 """
796 787 Fills the Dirstatemap when called.
797 788 """
798 789 # ignore HG_PENDING because identity is used only for writing
799 790 self.identity = util.filestat.frompath(
800 791 self._opener.join(self._filename)
801 792 )
802 793
803 794 if self._use_dirstate_v2:
804 795 if self.docket.uuid:
805 796 # TODO: use mmap when possible
806 797 data = self._opener.read(self.docket.data_filename())
807 798 else:
808 799 data = b''
809 800 self._rustmap = rustmod.DirstateMap.new_v2(
810 801 data, self.docket.data_size, self.docket.tree_metadata
811 802 )
812 803 parents = self.docket.parents
813 804 else:
814 805 self._rustmap, parents = rustmod.DirstateMap.new_v1(
815 806 self._readdirstatefile()
816 807 )
817 808
818 809 if parents and not self._dirtyparents:
819 810 self.setparents(*parents)
820 811
821 812 self.__contains__ = self._rustmap.__contains__
822 813 self.__getitem__ = self._rustmap.__getitem__
823 814 self.get = self._rustmap.get
824 815 return self._rustmap
825 816
826 817 def write(self, tr, st, now):
827 818 if not self._use_dirstate_v2:
828 819 p1, p2 = self.parents()
829 820 packed = self._rustmap.write_v1(p1, p2, now)
830 821 st.write(packed)
831 822 st.close()
832 823 self._dirtyparents = False
833 824 return
834 825
835 826 # We can only append to an existing data file if there is one
836 827 can_append = self.docket.uuid is not None
837 828 packed, meta, append = self._rustmap.write_v2(now, can_append)
838 829 if append:
839 830 docket = self.docket
840 831 data_filename = docket.data_filename()
841 832 if tr:
842 833 tr.add(data_filename, docket.data_size)
843 834 with self._opener(data_filename, b'r+b') as fp:
844 835 fp.seek(docket.data_size)
845 836 assert fp.tell() == docket.data_size
846 837 written = fp.write(packed)
847 838 if written is not None: # py2 may return None
848 839 assert written == len(packed), (written, len(packed))
849 840 docket.data_size += len(packed)
850 841 docket.parents = self.parents()
851 842 docket.tree_metadata = meta
852 843 st.write(docket.serialize())
853 844 st.close()
854 845 else:
855 846 old_docket = self.docket
856 847 new_docket = docketmod.DirstateDocket.with_new_uuid(
857 848 self.parents(), len(packed), meta
858 849 )
859 850 data_filename = new_docket.data_filename()
860 851 if tr:
861 852 tr.add(data_filename, 0)
862 853 self._opener.write(data_filename, packed)
863 854 # Write the new docket after the new data file has been
864 855 # written. Because `st` was opened with `atomictemp=True`,
865 856 # the actual `.hg/dirstate` file is only affected on close.
866 857 st.write(new_docket.serialize())
867 858 st.close()
868 859 # Remove the old data file after the new docket pointing to
869 860 # the new data file was written.
870 861 if old_docket.uuid:
871 862 data_filename = old_docket.data_filename()
872 863 unlink = lambda _tr=None: self._opener.unlink(data_filename)
873 864 if tr:
874 865 category = b"dirstate-v2-clean-" + old_docket.uuid
875 866 tr.addpostclose(category, unlink)
876 867 else:
877 868 unlink()
878 869 self._docket = new_docket
879 870 # Reload from the newly-written file
880 871 util.clearcachedproperty(self, b"_rustmap")
881 872 self._dirtyparents = False
882 873
883 874 @propertycache
884 875 def filefoldmap(self):
885 876 """Returns a dictionary mapping normalized case paths to their
886 877 non-normalized versions.
887 878 """
888 879 return self._rustmap.filefoldmapasdict()
889 880
890 881 def hastrackeddir(self, d):
891 882 return self._rustmap.hastrackeddir(d)
892 883
893 884 def hasdir(self, d):
894 885 return self._rustmap.hasdir(d)
895 886
896 887 @propertycache
897 888 def identity(self):
898 889 self._rustmap
899 890 return self.identity
900 891
901 892 @propertycache
902 893 def dirfoldmap(self):
903 894 f = {}
904 895 normcase = util.normcase
905 896 for name in self._rustmap.tracked_dirs():
906 897 f[normcase(name)] = name
907 898 return f
908 899
909 900 def set_possibly_dirty(self, filename):
910 901 """record that the current state of the file on disk is unknown"""
911 902 entry = self[filename]
912 903 entry.set_possibly_dirty()
913 904 self._rustmap.set_dirstate_item(filename, entry)
914 905
915 906 def set_clean(self, filename, mode, size, mtime):
916 907 """mark a file as back to a clean state"""
917 908 entry = self[filename]
918 909 mtime = mtime & rangemask
919 910 size = size & rangemask
920 911 entry.set_clean(mode, size, mtime)
921 912 self._rustmap.set_dirstate_item(filename, entry)
922 913 self._rustmap.copymap().pop(filename, None)
923 914
924 915 def __setitem__(self, key, value):
925 916 assert isinstance(value, DirstateItem)
926 917 self._rustmap.set_dirstate_item(key, value)
@@ -1,866 +1,869 b''
1 1 from __future__ import absolute_import
2 2
3 3 import collections
4 4 import errno
5 5 import shutil
6 6 import struct
7 7
8 8 from .i18n import _
9 9 from .node import (
10 10 bin,
11 11 hex,
12 12 nullrev,
13 13 )
14 14 from . import (
15 15 error,
16 16 filemerge,
17 17 pycompat,
18 18 util,
19 19 )
20 20 from .utils import hashutil
21 21
22 22 _pack = struct.pack
23 23 _unpack = struct.unpack
24 24
25 25
26 26 def _droponode(data):
27 27 # used for compatibility for v1
28 28 bits = data.split(b'\0')
29 29 bits = bits[:-2] + bits[-1:]
30 30 return b'\0'.join(bits)
31 31
32 32
33 33 def _filectxorabsent(hexnode, ctx, f):
34 34 if hexnode == ctx.repo().nodeconstants.nullhex:
35 35 return filemerge.absentfilectx(ctx, f)
36 36 else:
37 37 return ctx[f]
38 38
39 39
40 40 # Merge state record types. See ``mergestate`` docs for more.
41 41
42 42 ####
43 43 # merge records which records metadata about a current merge
44 44 # exists only once in a mergestate
45 45 #####
46 46 RECORD_LOCAL = b'L'
47 47 RECORD_OTHER = b'O'
48 48 # record merge labels
49 49 RECORD_LABELS = b'l'
50 50
51 51 #####
52 52 # record extra information about files, with one entry containing info about one
53 53 # file. Hence, multiple of them can exists
54 54 #####
55 55 RECORD_FILE_VALUES = b'f'
56 56
57 57 #####
58 58 # merge records which represents state of individual merges of files/folders
59 59 # These are top level records for each entry containing merge related info.
60 60 # Each record of these has info about one file. Hence multiple of them can
61 61 # exists
62 62 #####
63 63 RECORD_MERGED = b'F'
64 64 RECORD_CHANGEDELETE_CONFLICT = b'C'
65 65 # the path was dir on one side of merge and file on another
66 66 RECORD_PATH_CONFLICT = b'P'
67 67
68 68 #####
69 69 # possible state which a merge entry can have. These are stored inside top-level
70 70 # merge records mentioned just above.
71 71 #####
72 72 MERGE_RECORD_UNRESOLVED = b'u'
73 73 MERGE_RECORD_RESOLVED = b'r'
74 74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 76 # represents that the file was automatically merged in favor
77 77 # of other version. This info is used on commit.
78 78 # This is now deprecated and commit related information is now
79 79 # stored in RECORD_FILE_VALUES
80 80 MERGE_RECORD_MERGED_OTHER = b'o'
81 81
82 82 #####
83 83 # top level record which stores other unknown records. Multiple of these can
84 84 # exists
85 85 #####
86 86 RECORD_OVERRIDE = b't'
87 87
88 88 #####
89 89 # legacy records which are no longer used but kept to prevent breaking BC
90 90 #####
91 91 # This record was release in 5.4 and usage was removed in 5.5
92 92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
93 93 # This record was release in 3.7 and usage was removed in 5.6
94 94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
95 95 # This record was release in 3.7 and usage was removed in 5.6
96 96 LEGACY_MERGE_DRIVER_STATE = b'm'
97 97 # This record was release in 3.7 and usage was removed in 5.6
98 98 LEGACY_MERGE_DRIVER_MERGE = b'D'
99 99
100 100
101 101 ACTION_FORGET = b'f'
102 102 ACTION_REMOVE = b'r'
103 103 ACTION_ADD = b'a'
104 104 ACTION_GET = b'g'
105 105 ACTION_PATH_CONFLICT = b'p'
106 106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
107 107 ACTION_ADD_MODIFIED = b'am'
108 108 ACTION_CREATED = b'c'
109 109 ACTION_DELETED_CHANGED = b'dc'
110 110 ACTION_CHANGED_DELETED = b'cd'
111 111 ACTION_MERGE = b'm'
112 112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
113 113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
114 114 ACTION_KEEP = b'k'
115 115 # the file was absent on local side before merge and we should
116 116 # keep it absent (absent means file not present, it can be a result
117 117 # of file deletion, rename etc.)
118 118 ACTION_KEEP_ABSENT = b'ka'
119 119 # the file is absent on the ancestor and remote side of the merge
120 120 # hence this file is new and we should keep it
121 121 ACTION_KEEP_NEW = b'kn'
122 122 ACTION_EXEC = b'e'
123 123 ACTION_CREATED_MERGE = b'cm'
124 124
125 125 # actions which are no op
126 126 NO_OP_ACTIONS = (
127 127 ACTION_KEEP,
128 128 ACTION_KEEP_ABSENT,
129 129 ACTION_KEEP_NEW,
130 130 )
131 131
132 132
133 133 class _mergestate_base(object):
134 134 """track 3-way merge state of individual files
135 135
136 136 The merge state is stored on disk when needed. Two files are used: one with
137 137 an old format (version 1), and one with a new format (version 2). Version 2
138 138 stores a superset of the data in version 1, including new kinds of records
139 139 in the future. For more about the new format, see the documentation for
140 140 `_readrecordsv2`.
141 141
142 142 Each record can contain arbitrary content, and has an associated type. This
143 143 `type` should be a letter. If `type` is uppercase, the record is mandatory:
144 144 versions of Mercurial that don't support it should abort. If `type` is
145 145 lowercase, the record can be safely ignored.
146 146
147 147 Currently known records:
148 148
149 149 L: the node of the "local" part of the merge (hexified version)
150 150 O: the node of the "other" part of the merge (hexified version)
151 151 F: a file to be merged entry
152 152 C: a change/delete or delete/change conflict
153 153 P: a path conflict (file vs directory)
154 154 f: a (filename, dictionary) tuple of optional values for a given file
155 155 l: the labels for the parts of the merge.
156 156
157 157 Merge record states (stored in self._state, indexed by filename):
158 158 u: unresolved conflict
159 159 r: resolved conflict
160 160 pu: unresolved path conflict (file conflicts with directory)
161 161 pr: resolved path conflict
162 162 o: file was merged in favor of other parent of merge (DEPRECATED)
163 163
164 164 The resolve command transitions between 'u' and 'r' for conflicts and
165 165 'pu' and 'pr' for path conflicts.
166 166 """
167 167
168 168 def __init__(self, repo):
169 169 """Initialize the merge state.
170 170
171 171 Do not use this directly! Instead call read() or clean()."""
172 172 self._repo = repo
173 173 self._state = {}
174 174 self._stateextras = collections.defaultdict(dict)
175 175 self._local = None
176 176 self._other = None
177 177 self._labels = None
178 178 # contains a mapping of form:
179 179 # {filename : (merge_return_value, action_to_be_performed}
180 180 # these are results of re-running merge process
181 181 # this dict is used to perform actions on dirstate caused by re-running
182 182 # the merge
183 183 self._results = {}
184 184 self._dirty = False
185 185
186 186 def reset(self):
187 187 pass
188 188
189 189 def start(self, node, other, labels=None):
190 190 self._local = node
191 191 self._other = other
192 192 self._labels = labels
193 193
194 194 @util.propertycache
195 195 def local(self):
196 196 if self._local is None:
197 197 msg = b"local accessed but self._local isn't set"
198 198 raise error.ProgrammingError(msg)
199 199 return self._local
200 200
201 201 @util.propertycache
202 202 def localctx(self):
203 203 return self._repo[self.local]
204 204
205 205 @util.propertycache
206 206 def other(self):
207 207 if self._other is None:
208 208 msg = b"other accessed but self._other isn't set"
209 209 raise error.ProgrammingError(msg)
210 210 return self._other
211 211
212 212 @util.propertycache
213 213 def otherctx(self):
214 214 return self._repo[self.other]
215 215
216 216 def active(self):
217 217 """Whether mergestate is active.
218 218
219 219 Returns True if there appears to be mergestate. This is a rough proxy
220 220 for "is a merge in progress."
221 221 """
222 222 return bool(self._local) or bool(self._state)
223 223
224 224 def commit(self):
225 225 """Write current state on disk (if necessary)"""
226 226
227 227 @staticmethod
228 228 def getlocalkey(path):
229 229 """hash the path of a local file context for storage in the .hg/merge
230 230 directory."""
231 231
232 232 return hex(hashutil.sha1(path).digest())
233 233
234 234 def _make_backup(self, fctx, localkey):
235 235 raise NotImplementedError()
236 236
237 237 def _restore_backup(self, fctx, localkey, flags):
238 238 raise NotImplementedError()
239 239
240 240 def add(self, fcl, fco, fca, fd):
241 241 """add a new (potentially?) conflicting file the merge state
242 242 fcl: file context for local,
243 243 fco: file context for remote,
244 244 fca: file context for ancestors,
245 245 fd: file path of the resulting merge.
246 246
247 247 note: also write the local version to the `.hg/merge` directory.
248 248 """
249 249 if fcl.isabsent():
250 250 localkey = self._repo.nodeconstants.nullhex
251 251 else:
252 252 localkey = mergestate.getlocalkey(fcl.path())
253 253 self._make_backup(fcl, localkey)
254 254 self._state[fd] = [
255 255 MERGE_RECORD_UNRESOLVED,
256 256 localkey,
257 257 fcl.path(),
258 258 fca.path(),
259 259 hex(fca.filenode()),
260 260 fco.path(),
261 261 hex(fco.filenode()),
262 262 fcl.flags(),
263 263 ]
264 264 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
265 265 self._dirty = True
266 266
267 267 def addpathconflict(self, path, frename, forigin):
268 268 """add a new conflicting path to the merge state
269 269 path: the path that conflicts
270 270 frename: the filename the conflicting file was renamed to
271 271 forigin: origin of the file ('l' or 'r' for local/remote)
272 272 """
273 273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
274 274 self._dirty = True
275 275
276 276 def addcommitinfo(self, path, data):
277 277 """stores information which is required at commit
278 278 into _stateextras"""
279 279 self._stateextras[path].update(data)
280 280 self._dirty = True
281 281
282 282 def __contains__(self, dfile):
283 283 return dfile in self._state
284 284
285 285 def __getitem__(self, dfile):
286 286 return self._state[dfile][0]
287 287
288 288 def __iter__(self):
289 289 return iter(sorted(self._state))
290 290
291 291 def files(self):
292 292 return self._state.keys()
293 293
294 294 def mark(self, dfile, state):
295 295 self._state[dfile][0] = state
296 296 self._dirty = True
297 297
298 298 def unresolved(self):
299 299 """Obtain the paths of unresolved files."""
300 300
301 301 for f, entry in pycompat.iteritems(self._state):
302 302 if entry[0] in (
303 303 MERGE_RECORD_UNRESOLVED,
304 304 MERGE_RECORD_UNRESOLVED_PATH,
305 305 ):
306 306 yield f
307 307
308 308 def allextras(self):
309 309 """return all extras information stored with the mergestate"""
310 310 return self._stateextras
311 311
312 312 def extras(self, filename):
313 313 """return extras stored with the mergestate for the given filename"""
314 314 return self._stateextras[filename]
315 315
316 316 def _resolve(self, preresolve, dfile, wctx):
317 317 """rerun merge process for file path `dfile`.
318 318 Returns whether the merge was completed and the return value of merge
319 319 obtained from filemerge._filemerge().
320 320 """
321 321 if self[dfile] in (
322 322 MERGE_RECORD_RESOLVED,
323 323 LEGACY_RECORD_DRIVER_RESOLVED,
324 324 ):
325 325 return True, 0
326 326 stateentry = self._state[dfile]
327 327 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
328 328 octx = self._repo[self._other]
329 329 extras = self.extras(dfile)
330 330 anccommitnode = extras.get(b'ancestorlinknode')
331 331 if anccommitnode:
332 332 actx = self._repo[anccommitnode]
333 333 else:
334 334 actx = None
335 335 fcd = _filectxorabsent(localkey, wctx, dfile)
336 336 fco = _filectxorabsent(onode, octx, ofile)
337 337 # TODO: move this to filectxorabsent
338 338 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
339 339 # "premerge" x flags
340 340 flo = fco.flags()
341 341 fla = fca.flags()
342 342 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
343 343 if fca.rev() == nullrev and flags != flo:
344 344 if preresolve:
345 345 self._repo.ui.warn(
346 346 _(
347 347 b'warning: cannot merge flags for %s '
348 348 b'without common ancestor - keeping local flags\n'
349 349 )
350 350 % afile
351 351 )
352 352 elif flags == fla:
353 353 flags = flo
354 354 if preresolve:
355 355 # restore local
356 356 if localkey != self._repo.nodeconstants.nullhex:
357 357 self._restore_backup(wctx[dfile], localkey, flags)
358 358 else:
359 359 wctx[dfile].remove(ignoremissing=True)
360 360 complete, merge_ret, deleted = filemerge.premerge(
361 361 self._repo,
362 362 wctx,
363 363 self._local,
364 364 lfile,
365 365 fcd,
366 366 fco,
367 367 fca,
368 368 labels=self._labels,
369 369 )
370 370 else:
371 371 complete, merge_ret, deleted = filemerge.filemerge(
372 372 self._repo,
373 373 wctx,
374 374 self._local,
375 375 lfile,
376 376 fcd,
377 377 fco,
378 378 fca,
379 379 labels=self._labels,
380 380 )
381 381 if merge_ret is None:
382 382 # If return value of merge is None, then there are no real conflict
383 383 del self._state[dfile]
384 384 self._dirty = True
385 385 elif not merge_ret:
386 386 self.mark(dfile, MERGE_RECORD_RESOLVED)
387 387
388 388 if complete:
389 389 action = None
390 390 if deleted:
391 391 if fcd.isabsent():
392 392 # dc: local picked. Need to drop if present, which may
393 393 # happen on re-resolves.
394 394 action = ACTION_FORGET
395 395 else:
396 396 # cd: remote picked (or otherwise deleted)
397 397 action = ACTION_REMOVE
398 398 else:
399 399 if fcd.isabsent(): # dc: remote picked
400 400 action = ACTION_GET
401 401 elif fco.isabsent(): # cd: local picked
402 402 if dfile in self.localctx:
403 403 action = ACTION_ADD_MODIFIED
404 404 else:
405 405 action = ACTION_ADD
406 406 # else: regular merges (no action necessary)
407 407 self._results[dfile] = merge_ret, action
408 408
409 409 return complete, merge_ret
410 410
411 411 def preresolve(self, dfile, wctx):
412 412 """run premerge process for dfile
413 413
414 414 Returns whether the merge is complete, and the exit code."""
415 415 return self._resolve(True, dfile, wctx)
416 416
417 417 def resolve(self, dfile, wctx):
418 418 """run merge process (assuming premerge was run) for dfile
419 419
420 420 Returns the exit code of the merge."""
421 421 return self._resolve(False, dfile, wctx)[1]
422 422
423 423 def counts(self):
424 424 """return counts for updated, merged and removed files in this
425 425 session"""
426 426 updated, merged, removed = 0, 0, 0
427 427 for r, action in pycompat.itervalues(self._results):
428 428 if r is None:
429 429 updated += 1
430 430 elif r == 0:
431 431 if action == ACTION_REMOVE:
432 432 removed += 1
433 433 else:
434 434 merged += 1
435 435 return updated, merged, removed
436 436
437 437 def unresolvedcount(self):
438 438 """get unresolved count for this merge (persistent)"""
439 439 return len(list(self.unresolved()))
440 440
441 441 def actions(self):
442 442 """return lists of actions to perform on the dirstate"""
443 443 actions = {
444 444 ACTION_REMOVE: [],
445 445 ACTION_FORGET: [],
446 446 ACTION_ADD: [],
447 447 ACTION_ADD_MODIFIED: [],
448 448 ACTION_GET: [],
449 449 }
450 450 for f, (r, action) in pycompat.iteritems(self._results):
451 451 if action is not None:
452 452 actions[action].append((f, None, b"merge result"))
453 453 return actions
454 454
455 455
456 456 class mergestate(_mergestate_base):
457 457
458 458 statepathv1 = b'merge/state'
459 459 statepathv2 = b'merge/state2'
460 460
461 461 @staticmethod
462 462 def clean(repo):
463 463 """Initialize a brand new merge state, removing any existing state on
464 464 disk."""
465 465 ms = mergestate(repo)
466 466 ms.reset()
467 467 return ms
468 468
469 469 @staticmethod
470 470 def read(repo):
471 471 """Initialize the merge state, reading it from disk."""
472 472 ms = mergestate(repo)
473 473 ms._read()
474 474 return ms
475 475
476 476 def _read(self):
477 477 """Analyse each record content to restore a serialized state from disk
478 478
479 479 This function process "record" entry produced by the de-serialization
480 480 of on disk file.
481 481 """
482 482 unsupported = set()
483 483 records = self._readrecords()
484 484 for rtype, record in records:
485 485 if rtype == RECORD_LOCAL:
486 486 self._local = bin(record)
487 487 elif rtype == RECORD_OTHER:
488 488 self._other = bin(record)
489 489 elif rtype == LEGACY_MERGE_DRIVER_STATE:
490 490 pass
491 491 elif rtype in (
492 492 RECORD_MERGED,
493 493 RECORD_CHANGEDELETE_CONFLICT,
494 494 RECORD_PATH_CONFLICT,
495 495 LEGACY_MERGE_DRIVER_MERGE,
496 496 LEGACY_RECORD_RESOLVED_OTHER,
497 497 ):
498 498 bits = record.split(b'\0')
499 499 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
500 500 # and we now store related information in _stateextras, so
501 501 # lets write to _stateextras directly
502 502 if bits[1] == MERGE_RECORD_MERGED_OTHER:
503 503 self._stateextras[bits[0]][b'filenode-source'] = b'other'
504 504 else:
505 505 self._state[bits[0]] = bits[1:]
506 506 elif rtype == RECORD_FILE_VALUES:
507 507 filename, rawextras = record.split(b'\0', 1)
508 508 extraparts = rawextras.split(b'\0')
509 509 extras = {}
510 510 i = 0
511 511 while i < len(extraparts):
512 512 extras[extraparts[i]] = extraparts[i + 1]
513 513 i += 2
514 514
515 515 self._stateextras[filename] = extras
516 516 elif rtype == RECORD_LABELS:
517 517 labels = record.split(b'\0', 2)
518 518 self._labels = [l for l in labels if len(l) > 0]
519 519 elif not rtype.islower():
520 520 unsupported.add(rtype)
521 521
522 522 if unsupported:
523 523 raise error.UnsupportedMergeRecords(unsupported)
524 524
525 525 def _readrecords(self):
526 526 """Read merge state from disk and return a list of record (TYPE, data)
527 527
528 528 We read data from both v1 and v2 files and decide which one to use.
529 529
530 530 V1 has been used by version prior to 2.9.1 and contains less data than
531 531 v2. We read both versions and check if no data in v2 contradicts
532 532 v1. If there is not contradiction we can safely assume that both v1
533 533 and v2 were written at the same time and use the extract data in v2. If
534 534 there is contradiction we ignore v2 content as we assume an old version
535 535 of Mercurial has overwritten the mergestate file and left an old v2
536 536 file around.
537 537
538 538 returns list of record [(TYPE, data), ...]"""
539 539 v1records = self._readrecordsv1()
540 540 v2records = self._readrecordsv2()
541 541 if self._v1v2match(v1records, v2records):
542 542 return v2records
543 543 else:
544 544 # v1 file is newer than v2 file, use it
545 545 # we have to infer the "other" changeset of the merge
546 546 # we cannot do better than that with v1 of the format
547 547 mctx = self._repo[None].parents()[-1]
548 548 v1records.append((RECORD_OTHER, mctx.hex()))
549 549 # add place holder "other" file node information
550 550 # nobody is using it yet so we do no need to fetch the data
551 551 # if mctx was wrong `mctx[bits[-2]]` may fails.
552 552 for idx, r in enumerate(v1records):
553 553 if r[0] == RECORD_MERGED:
554 554 bits = r[1].split(b'\0')
555 555 bits.insert(-2, b'')
556 556 v1records[idx] = (r[0], b'\0'.join(bits))
557 557 return v1records
558 558
559 559 def _v1v2match(self, v1records, v2records):
560 560 oldv2 = set() # old format version of v2 record
561 561 for rec in v2records:
562 562 if rec[0] == RECORD_LOCAL:
563 563 oldv2.add(rec)
564 564 elif rec[0] == RECORD_MERGED:
565 565 # drop the onode data (not contained in v1)
566 566 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
567 567 for rec in v1records:
568 568 if rec not in oldv2:
569 569 return False
570 570 else:
571 571 return True
572 572
573 573 def _readrecordsv1(self):
574 574 """read on disk merge state for version 1 file
575 575
576 576 returns list of record [(TYPE, data), ...]
577 577
578 578 Note: the "F" data from this file are one entry short
579 579 (no "other file node" entry)
580 580 """
581 581 records = []
582 582 try:
583 583 f = self._repo.vfs(self.statepathv1)
584 584 for i, l in enumerate(f):
585 585 if i == 0:
586 586 records.append((RECORD_LOCAL, l[:-1]))
587 587 else:
588 588 records.append((RECORD_MERGED, l[:-1]))
589 589 f.close()
590 590 except IOError as err:
591 591 if err.errno != errno.ENOENT:
592 592 raise
593 593 return records
594 594
595 595 def _readrecordsv2(self):
596 596 """read on disk merge state for version 2 file
597 597
598 598 This format is a list of arbitrary records of the form:
599 599
600 600 [type][length][content]
601 601
602 602 `type` is a single character, `length` is a 4 byte integer, and
603 603 `content` is an arbitrary byte sequence of length `length`.
604 604
605 605 Mercurial versions prior to 3.7 have a bug where if there are
606 606 unsupported mandatory merge records, attempting to clear out the merge
607 607 state with hg update --clean or similar aborts. The 't' record type
608 608 works around that by writing out what those versions treat as an
609 609 advisory record, but later versions interpret as special: the first
610 610 character is the 'real' record type and everything onwards is the data.
611 611
612 612 Returns list of records [(TYPE, data), ...]."""
613 613 records = []
614 614 try:
615 615 f = self._repo.vfs(self.statepathv2)
616 616 data = f.read()
617 617 off = 0
618 618 end = len(data)
619 619 while off < end:
620 620 rtype = data[off : off + 1]
621 621 off += 1
622 622 length = _unpack(b'>I', data[off : (off + 4)])[0]
623 623 off += 4
624 624 record = data[off : (off + length)]
625 625 off += length
626 626 if rtype == RECORD_OVERRIDE:
627 627 rtype, record = record[0:1], record[1:]
628 628 records.append((rtype, record))
629 629 f.close()
630 630 except IOError as err:
631 631 if err.errno != errno.ENOENT:
632 632 raise
633 633 return records
634 634
635 635 def commit(self):
636 636 if self._dirty:
637 637 records = self._makerecords()
638 638 self._writerecords(records)
639 639 self._dirty = False
640 640
641 641 def _makerecords(self):
642 642 records = []
643 643 records.append((RECORD_LOCAL, hex(self._local)))
644 644 records.append((RECORD_OTHER, hex(self._other)))
645 645 # Write out state items. In all cases, the value of the state map entry
646 646 # is written as the contents of the record. The record type depends on
647 647 # the type of state that is stored, and capital-letter records are used
648 648 # to prevent older versions of Mercurial that do not support the feature
649 649 # from loading them.
650 650 for filename, v in pycompat.iteritems(self._state):
651 651 if v[0] in (
652 652 MERGE_RECORD_UNRESOLVED_PATH,
653 653 MERGE_RECORD_RESOLVED_PATH,
654 654 ):
655 655 # Path conflicts. These are stored in 'P' records. The current
656 656 # resolution state ('pu' or 'pr') is stored within the record.
657 657 records.append(
658 658 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
659 659 )
660 660 elif (
661 661 v[1] == self._repo.nodeconstants.nullhex
662 662 or v[6] == self._repo.nodeconstants.nullhex
663 663 ):
664 664 # Change/Delete or Delete/Change conflicts. These are stored in
665 665 # 'C' records. v[1] is the local file, and is nullhex when the
666 666 # file is deleted locally ('dc'). v[6] is the remote file, and
667 667 # is nullhex when the file is deleted remotely ('cd').
668 668 records.append(
669 669 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
670 670 )
671 671 else:
672 672 # Normal files. These are stored in 'F' records.
673 673 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
674 674 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
675 675 rawextras = b'\0'.join(
676 676 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
677 677 )
678 678 records.append(
679 679 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
680 680 )
681 681 if self._labels is not None:
682 682 labels = b'\0'.join(self._labels)
683 683 records.append((RECORD_LABELS, labels))
684 684 return records
685 685
686 686 def _writerecords(self, records):
687 687 """Write current state on disk (both v1 and v2)"""
688 688 self._writerecordsv1(records)
689 689 self._writerecordsv2(records)
690 690
691 691 def _writerecordsv1(self, records):
692 692 """Write current state on disk in a version 1 file"""
693 693 f = self._repo.vfs(self.statepathv1, b'wb')
694 694 irecords = iter(records)
695 695 lrecords = next(irecords)
696 696 assert lrecords[0] == RECORD_LOCAL
697 697 f.write(hex(self._local) + b'\n')
698 698 for rtype, data in irecords:
699 699 if rtype == RECORD_MERGED:
700 700 f.write(b'%s\n' % _droponode(data))
701 701 f.close()
702 702
703 703 def _writerecordsv2(self, records):
704 704 """Write current state on disk in a version 2 file
705 705
706 706 See the docstring for _readrecordsv2 for why we use 't'."""
707 707 # these are the records that all version 2 clients can read
708 708 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
709 709 f = self._repo.vfs(self.statepathv2, b'wb')
710 710 for key, data in records:
711 711 assert len(key) == 1
712 712 if key not in allowlist:
713 713 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
714 714 format = b'>sI%is' % len(data)
715 715 f.write(_pack(format, key, len(data), data))
716 716 f.close()
717 717
718 718 def _make_backup(self, fctx, localkey):
719 719 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
720 720
721 721 def _restore_backup(self, fctx, localkey, flags):
722 722 with self._repo.vfs(b'merge/' + localkey) as f:
723 723 fctx.write(f.read(), flags)
724 724
725 725 def reset(self):
726 726 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
727 727
728 728
729 729 class memmergestate(_mergestate_base):
730 730 def __init__(self, repo):
731 731 super(memmergestate, self).__init__(repo)
732 732 self._backups = {}
733 733
734 734 def _make_backup(self, fctx, localkey):
735 735 self._backups[localkey] = fctx.data()
736 736
737 737 def _restore_backup(self, fctx, localkey, flags):
738 738 fctx.write(self._backups[localkey], flags)
739 739
740 740
741 741 def recordupdates(repo, actions, branchmerge, getfiledata):
742 742 """record merge actions to the dirstate"""
743 743 # remove (must come first)
744 744 for f, args, msg in actions.get(ACTION_REMOVE, []):
745 745 if branchmerge:
746 746 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
747 747 else:
748 748 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
749 749
750 750 # forget (must come first)
751 751 for f, args, msg in actions.get(ACTION_FORGET, []):
752 752 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
753 753
754 754 # resolve path conflicts
755 755 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
756 756 (f0, origf0) = args
757 757 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
758 758 repo.dirstate.copy(origf0, f)
759 759 if f0 == origf0:
760 760 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
761 761 else:
762 762 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
763 763
764 764 # re-add
765 765 for f, args, msg in actions.get(ACTION_ADD, []):
766 766 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
767 767
768 768 # re-add/mark as modified
769 769 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
770 770 if branchmerge:
771 771 repo.dirstate.update_file(
772 772 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
773 773 )
774 774 else:
775 775 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
776 776
777 777 # exec change
778 778 for f, args, msg in actions.get(ACTION_EXEC, []):
779 779 repo.dirstate.update_file(
780 780 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
781 781 )
782 782
783 783 # keep
784 784 for f, args, msg in actions.get(ACTION_KEEP, []):
785 785 pass
786 786
787 787 # keep deleted
788 788 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
789 789 pass
790 790
791 791 # keep new
792 792 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
793 793 pass
794 794
795 795 # get
796 796 for f, args, msg in actions.get(ACTION_GET, []):
797 797 if branchmerge:
798 798 # tracked in p1 can be True also but update_file should not care
799 old_entry = repo.dirstate.get_entry(f)
800 p1_tracked = old_entry.any_tracked and not old_entry.added
799 801 repo.dirstate.update_file(
800 802 f,
801 p1_tracked=False,
803 p1_tracked=p1_tracked,
802 804 p2_tracked=True,
803 805 wc_tracked=True,
804 clean_p2=True,
806 clean_p2=not p1_tracked,
807 merged=p1_tracked,
805 808 )
806 809 else:
807 810 parentfiledata = getfiledata[f] if getfiledata else None
808 811 repo.dirstate.update_file(
809 812 f,
810 813 p1_tracked=True,
811 814 wc_tracked=True,
812 815 parentfiledata=parentfiledata,
813 816 )
814 817
815 818 # merge
816 819 for f, args, msg in actions.get(ACTION_MERGE, []):
817 820 f1, f2, fa, move, anc = args
818 821 if branchmerge:
819 822 # We've done a branch merge, mark this file as merged
820 823 # so that we properly record the merger later
821 824 repo.dirstate.update_file(
822 825 f, p1_tracked=True, wc_tracked=True, merged=True
823 826 )
824 827 if f1 != f2: # copy/rename
825 828 if move:
826 829 repo.dirstate.update_file(
827 830 f1, p1_tracked=True, wc_tracked=False
828 831 )
829 832 if f1 != f:
830 833 repo.dirstate.copy(f1, f)
831 834 else:
832 835 repo.dirstate.copy(f2, f)
833 836 else:
834 837 # We've update-merged a locally modified file, so
835 838 # we set the dirstate to emulate a normal checkout
836 839 # of that file some time in the past. Thus our
837 840 # merge will appear as a normal local file
838 841 # modification.
839 842 if f2 == f: # file not locally copied/moved
840 843 repo.dirstate.update_file(
841 844 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
842 845 )
843 846 if move:
844 847 repo.dirstate.update_file(
845 848 f1, p1_tracked=False, wc_tracked=False
846 849 )
847 850
848 851 # directory rename, move local
849 852 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
850 853 f0, flag = args
851 854 if branchmerge:
852 855 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
853 856 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
854 857 repo.dirstate.copy(f0, f)
855 858 else:
856 859 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
857 860 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
858 861
859 862 # directory rename, get
860 863 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
861 864 f0, flag = args
862 865 if branchmerge:
863 866 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
864 867 repo.dirstate.copy(f0, f)
865 868 else:
866 869 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
General Comments 0
You need to be logged in to leave comments. Login now