##// END OF EJS Templates
dirstate: include explicit matches in match.traversedir calls...
Martin von Zweigbergk -
r44112:95d2eab0 default
parent child Browse files
Show More
@@ -1,1843 +1,1848 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from .pycompat import delattr
19 19
20 20 from hgdemandimport import tracing
21 21
22 22 from . import (
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 txnutil,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 propertycache = util.propertycache
43 43 filecache = scmutil.filecache
44 44 _rangemask = 0x7FFFFFFF
45 45
46 46 dirstatetuple = parsers.dirstatetuple
47 47
48 48
49 49 class repocache(filecache):
50 50 """filecache for files in .hg/"""
51 51
52 52 def join(self, obj, fname):
53 53 return obj._opener.join(fname)
54 54
55 55
56 56 class rootcache(filecache):
57 57 """filecache for files in the repository root"""
58 58
59 59 def join(self, obj, fname):
60 60 return obj._join(fname)
61 61
62 62
63 63 def _getfsnow(vfs):
64 64 '''Get "now" timestamp on filesystem'''
65 65 tmpfd, tmpname = vfs.mkstemp()
66 66 try:
67 67 return os.fstat(tmpfd)[stat.ST_MTIME]
68 68 finally:
69 69 os.close(tmpfd)
70 70 vfs.unlink(tmpname)
71 71
72 72
73 73 @interfaceutil.implementer(intdirstate.idirstate)
74 74 class dirstate(object):
75 75 def __init__(self, opener, ui, root, validate, sparsematchfn):
76 76 '''Create a new dirstate object.
77 77
78 78 opener is an open()-like callable that can be used to open the
79 79 dirstate file; root is the root of the directory tracked by
80 80 the dirstate.
81 81 '''
82 82 self._opener = opener
83 83 self._validate = validate
84 84 self._root = root
85 85 self._sparsematchfn = sparsematchfn
86 86 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
87 87 # UNC path pointing to root share (issue4557)
88 88 self._rootdir = pathutil.normasprefix(root)
89 89 self._dirty = False
90 90 self._lastnormaltime = 0
91 91 self._ui = ui
92 92 self._filecache = {}
93 93 self._parentwriters = 0
94 94 self._filename = b'dirstate'
95 95 self._pendingfilename = b'%s.pending' % self._filename
96 96 self._plchangecallbacks = {}
97 97 self._origpl = None
98 98 self._updatedfiles = set()
99 99 self._mapcls = dirstatemap
100 100 # Access and cache cwd early, so we don't access it for the first time
101 101 # after a working-copy update caused it to not exist (accessing it then
102 102 # raises an exception).
103 103 self._cwd
104 104
105 105 @contextlib.contextmanager
106 106 def parentchange(self):
107 107 '''Context manager for handling dirstate parents.
108 108
109 109 If an exception occurs in the scope of the context manager,
110 110 the incoherent dirstate won't be written when wlock is
111 111 released.
112 112 '''
113 113 self._parentwriters += 1
114 114 yield
115 115 # Typically we want the "undo" step of a context manager in a
116 116 # finally block so it happens even when an exception
117 117 # occurs. In this case, however, we only want to decrement
118 118 # parentwriters if the code in the with statement exits
119 119 # normally, so we don't have a try/finally here on purpose.
120 120 self._parentwriters -= 1
121 121
122 122 def pendingparentchange(self):
123 123 '''Returns true if the dirstate is in the middle of a set of changes
124 124 that modify the dirstate parent.
125 125 '''
126 126 return self._parentwriters > 0
127 127
128 128 @propertycache
129 129 def _map(self):
130 130 """Return the dirstate contents (see documentation for dirstatemap)."""
131 131 self._map = self._mapcls(self._ui, self._opener, self._root)
132 132 return self._map
133 133
134 134 @property
135 135 def _sparsematcher(self):
136 136 """The matcher for the sparse checkout.
137 137
138 138 The working directory may not include every file from a manifest. The
139 139 matcher obtained by this property will match a path if it is to be
140 140 included in the working directory.
141 141 """
142 142 # TODO there is potential to cache this property. For now, the matcher
143 143 # is resolved on every access. (But the called function does use a
144 144 # cache to keep the lookup fast.)
145 145 return self._sparsematchfn()
146 146
147 147 @repocache(b'branch')
148 148 def _branch(self):
149 149 try:
150 150 return self._opener.read(b"branch").strip() or b"default"
151 151 except IOError as inst:
152 152 if inst.errno != errno.ENOENT:
153 153 raise
154 154 return b"default"
155 155
156 156 @property
157 157 def _pl(self):
158 158 return self._map.parents()
159 159
160 160 def hasdir(self, d):
161 161 return self._map.hastrackeddir(d)
162 162
163 163 @rootcache(b'.hgignore')
164 164 def _ignore(self):
165 165 files = self._ignorefiles()
166 166 if not files:
167 167 return matchmod.never()
168 168
169 169 pats = [b'include:%s' % f for f in files]
170 170 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
171 171
172 172 @propertycache
173 173 def _slash(self):
174 174 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
175 175
176 176 @propertycache
177 177 def _checklink(self):
178 178 return util.checklink(self._root)
179 179
180 180 @propertycache
181 181 def _checkexec(self):
182 182 return util.checkexec(self._root)
183 183
184 184 @propertycache
185 185 def _checkcase(self):
186 186 return not util.fscasesensitive(self._join(b'.hg'))
187 187
188 188 def _join(self, f):
189 189 # much faster than os.path.join()
190 190 # it's safe because f is always a relative path
191 191 return self._rootdir + f
192 192
193 193 def flagfunc(self, buildfallback):
194 194 if self._checklink and self._checkexec:
195 195
196 196 def f(x):
197 197 try:
198 198 st = os.lstat(self._join(x))
199 199 if util.statislink(st):
200 200 return b'l'
201 201 if util.statisexec(st):
202 202 return b'x'
203 203 except OSError:
204 204 pass
205 205 return b''
206 206
207 207 return f
208 208
209 209 fallback = buildfallback()
210 210 if self._checklink:
211 211
212 212 def f(x):
213 213 if os.path.islink(self._join(x)):
214 214 return b'l'
215 215 if b'x' in fallback(x):
216 216 return b'x'
217 217 return b''
218 218
219 219 return f
220 220 if self._checkexec:
221 221
222 222 def f(x):
223 223 if b'l' in fallback(x):
224 224 return b'l'
225 225 if util.isexec(self._join(x)):
226 226 return b'x'
227 227 return b''
228 228
229 229 return f
230 230 else:
231 231 return fallback
232 232
233 233 @propertycache
234 234 def _cwd(self):
235 235 # internal config: ui.forcecwd
236 236 forcecwd = self._ui.config(b'ui', b'forcecwd')
237 237 if forcecwd:
238 238 return forcecwd
239 239 return encoding.getcwd()
240 240
241 241 def getcwd(self):
242 242 '''Return the path from which a canonical path is calculated.
243 243
244 244 This path should be used to resolve file patterns or to convert
245 245 canonical paths back to file paths for display. It shouldn't be
246 246 used to get real file paths. Use vfs functions instead.
247 247 '''
248 248 cwd = self._cwd
249 249 if cwd == self._root:
250 250 return b''
251 251 # self._root ends with a path separator if self._root is '/' or 'C:\'
252 252 rootsep = self._root
253 253 if not util.endswithsep(rootsep):
254 254 rootsep += pycompat.ossep
255 255 if cwd.startswith(rootsep):
256 256 return cwd[len(rootsep) :]
257 257 else:
258 258 # we're outside the repo. return an absolute path.
259 259 return cwd
260 260
261 261 def pathto(self, f, cwd=None):
262 262 if cwd is None:
263 263 cwd = self.getcwd()
264 264 path = util.pathto(self._root, cwd, f)
265 265 if self._slash:
266 266 return util.pconvert(path)
267 267 return path
268 268
269 269 def __getitem__(self, key):
270 270 '''Return the current state of key (a filename) in the dirstate.
271 271
272 272 States are:
273 273 n normal
274 274 m needs merging
275 275 r marked for removal
276 276 a marked for addition
277 277 ? not tracked
278 278 '''
279 279 return self._map.get(key, (b"?",))[0]
280 280
281 281 def __contains__(self, key):
282 282 return key in self._map
283 283
284 284 def __iter__(self):
285 285 return iter(sorted(self._map))
286 286
287 287 def items(self):
288 288 return pycompat.iteritems(self._map)
289 289
290 290 iteritems = items
291 291
292 292 def parents(self):
293 293 return [self._validate(p) for p in self._pl]
294 294
295 295 def p1(self):
296 296 return self._validate(self._pl[0])
297 297
298 298 def p2(self):
299 299 return self._validate(self._pl[1])
300 300
301 301 def branch(self):
302 302 return encoding.tolocal(self._branch)
303 303
304 304 def setparents(self, p1, p2=nullid):
305 305 """Set dirstate parents to p1 and p2.
306 306
307 307 When moving from two parents to one, 'm' merged entries a
308 308 adjusted to normal and previous copy records discarded and
309 309 returned by the call.
310 310
311 311 See localrepo.setparents()
312 312 """
313 313 if self._parentwriters == 0:
314 314 raise ValueError(
315 315 b"cannot set dirstate parent outside of "
316 316 b"dirstate.parentchange context manager"
317 317 )
318 318
319 319 self._dirty = True
320 320 oldp2 = self._pl[1]
321 321 if self._origpl is None:
322 322 self._origpl = self._pl
323 323 self._map.setparents(p1, p2)
324 324 copies = {}
325 325 if oldp2 != nullid and p2 == nullid:
326 326 candidatefiles = self._map.nonnormalset.union(
327 327 self._map.otherparentset
328 328 )
329 329 for f in candidatefiles:
330 330 s = self._map.get(f)
331 331 if s is None:
332 332 continue
333 333
334 334 # Discard 'm' markers when moving away from a merge state
335 335 if s[0] == b'm':
336 336 source = self._map.copymap.get(f)
337 337 if source:
338 338 copies[f] = source
339 339 self.normallookup(f)
340 340 # Also fix up otherparent markers
341 341 elif s[0] == b'n' and s[2] == -2:
342 342 source = self._map.copymap.get(f)
343 343 if source:
344 344 copies[f] = source
345 345 self.add(f)
346 346 return copies
347 347
348 348 def setbranch(self, branch):
349 349 self.__class__._branch.set(self, encoding.fromlocal(branch))
350 350 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
351 351 try:
352 352 f.write(self._branch + b'\n')
353 353 f.close()
354 354
355 355 # make sure filecache has the correct stat info for _branch after
356 356 # replacing the underlying file
357 357 ce = self._filecache[b'_branch']
358 358 if ce:
359 359 ce.refresh()
360 360 except: # re-raises
361 361 f.discard()
362 362 raise
363 363
364 364 def invalidate(self):
365 365 '''Causes the next access to reread the dirstate.
366 366
367 367 This is different from localrepo.invalidatedirstate() because it always
368 368 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
369 369 check whether the dirstate has changed before rereading it.'''
370 370
371 371 for a in ("_map", "_branch", "_ignore"):
372 372 if a in self.__dict__:
373 373 delattr(self, a)
374 374 self._lastnormaltime = 0
375 375 self._dirty = False
376 376 self._updatedfiles.clear()
377 377 self._parentwriters = 0
378 378 self._origpl = None
379 379
380 380 def copy(self, source, dest):
381 381 """Mark dest as a copy of source. Unmark dest if source is None."""
382 382 if source == dest:
383 383 return
384 384 self._dirty = True
385 385 if source is not None:
386 386 self._map.copymap[dest] = source
387 387 self._updatedfiles.add(source)
388 388 self._updatedfiles.add(dest)
389 389 elif self._map.copymap.pop(dest, None):
390 390 self._updatedfiles.add(dest)
391 391
392 392 def copied(self, file):
393 393 return self._map.copymap.get(file, None)
394 394
395 395 def copies(self):
396 396 return self._map.copymap
397 397
398 398 def _addpath(self, f, state, mode, size, mtime):
399 399 oldstate = self[f]
400 400 if state == b'a' or oldstate == b'r':
401 401 scmutil.checkfilename(f)
402 402 if self._map.hastrackeddir(f):
403 403 raise error.Abort(
404 404 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
405 405 )
406 406 # shadows
407 407 for d in pathutil.finddirs(f):
408 408 if self._map.hastrackeddir(d):
409 409 break
410 410 entry = self._map.get(d)
411 411 if entry is not None and entry[0] != b'r':
412 412 raise error.Abort(
413 413 _(b'file %r in dirstate clashes with %r')
414 414 % (pycompat.bytestr(d), pycompat.bytestr(f))
415 415 )
416 416 self._dirty = True
417 417 self._updatedfiles.add(f)
418 418 self._map.addfile(f, oldstate, state, mode, size, mtime)
419 419
420 420 def normal(self, f, parentfiledata=None):
421 421 '''Mark a file normal and clean.
422 422
423 423 parentfiledata: (mode, size, mtime) of the clean file
424 424
425 425 parentfiledata should be computed from memory (for mode,
426 426 size), as or close as possible from the point where we
427 427 determined the file was clean, to limit the risk of the
428 428 file having been changed by an external process between the
429 429 moment where the file was determined to be clean and now.'''
430 430 if parentfiledata:
431 431 (mode, size, mtime) = parentfiledata
432 432 else:
433 433 s = os.lstat(self._join(f))
434 434 mode = s.st_mode
435 435 size = s.st_size
436 436 mtime = s[stat.ST_MTIME]
437 437 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
438 438 self._map.copymap.pop(f, None)
439 439 if f in self._map.nonnormalset:
440 440 self._map.nonnormalset.remove(f)
441 441 if mtime > self._lastnormaltime:
442 442 # Remember the most recent modification timeslot for status(),
443 443 # to make sure we won't miss future size-preserving file content
444 444 # modifications that happen within the same timeslot.
445 445 self._lastnormaltime = mtime
446 446
447 447 def normallookup(self, f):
448 448 '''Mark a file normal, but possibly dirty.'''
449 449 if self._pl[1] != nullid:
450 450 # if there is a merge going on and the file was either
451 451 # in state 'm' (-1) or coming from other parent (-2) before
452 452 # being removed, restore that state.
453 453 entry = self._map.get(f)
454 454 if entry is not None:
455 455 if entry[0] == b'r' and entry[2] in (-1, -2):
456 456 source = self._map.copymap.get(f)
457 457 if entry[2] == -1:
458 458 self.merge(f)
459 459 elif entry[2] == -2:
460 460 self.otherparent(f)
461 461 if source:
462 462 self.copy(source, f)
463 463 return
464 464 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
465 465 return
466 466 self._addpath(f, b'n', 0, -1, -1)
467 467 self._map.copymap.pop(f, None)
468 468
469 469 def otherparent(self, f):
470 470 '''Mark as coming from the other parent, always dirty.'''
471 471 if self._pl[1] == nullid:
472 472 raise error.Abort(
473 473 _(b"setting %r to other parent only allowed in merges") % f
474 474 )
475 475 if f in self and self[f] == b'n':
476 476 # merge-like
477 477 self._addpath(f, b'm', 0, -2, -1)
478 478 else:
479 479 # add-like
480 480 self._addpath(f, b'n', 0, -2, -1)
481 481 self._map.copymap.pop(f, None)
482 482
483 483 def add(self, f):
484 484 '''Mark a file added.'''
485 485 self._addpath(f, b'a', 0, -1, -1)
486 486 self._map.copymap.pop(f, None)
487 487
488 488 def remove(self, f):
489 489 '''Mark a file removed.'''
490 490 self._dirty = True
491 491 oldstate = self[f]
492 492 size = 0
493 493 if self._pl[1] != nullid:
494 494 entry = self._map.get(f)
495 495 if entry is not None:
496 496 # backup the previous state
497 497 if entry[0] == b'm': # merge
498 498 size = -1
499 499 elif entry[0] == b'n' and entry[2] == -2: # other parent
500 500 size = -2
501 501 self._map.otherparentset.add(f)
502 502 self._updatedfiles.add(f)
503 503 self._map.removefile(f, oldstate, size)
504 504 if size == 0:
505 505 self._map.copymap.pop(f, None)
506 506
507 507 def merge(self, f):
508 508 '''Mark a file merged.'''
509 509 if self._pl[1] == nullid:
510 510 return self.normallookup(f)
511 511 return self.otherparent(f)
512 512
513 513 def drop(self, f):
514 514 '''Drop a file from the dirstate'''
515 515 oldstate = self[f]
516 516 if self._map.dropfile(f, oldstate):
517 517 self._dirty = True
518 518 self._updatedfiles.add(f)
519 519 self._map.copymap.pop(f, None)
520 520
521 521 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
522 522 if exists is None:
523 523 exists = os.path.lexists(os.path.join(self._root, path))
524 524 if not exists:
525 525 # Maybe a path component exists
526 526 if not ignoremissing and b'/' in path:
527 527 d, f = path.rsplit(b'/', 1)
528 528 d = self._normalize(d, False, ignoremissing, None)
529 529 folded = d + b"/" + f
530 530 else:
531 531 # No path components, preserve original case
532 532 folded = path
533 533 else:
534 534 # recursively normalize leading directory components
535 535 # against dirstate
536 536 if b'/' in normed:
537 537 d, f = normed.rsplit(b'/', 1)
538 538 d = self._normalize(d, False, ignoremissing, True)
539 539 r = self._root + b"/" + d
540 540 folded = d + b"/" + util.fspath(f, r)
541 541 else:
542 542 folded = util.fspath(normed, self._root)
543 543 storemap[normed] = folded
544 544
545 545 return folded
546 546
547 547 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
548 548 normed = util.normcase(path)
549 549 folded = self._map.filefoldmap.get(normed, None)
550 550 if folded is None:
551 551 if isknown:
552 552 folded = path
553 553 else:
554 554 folded = self._discoverpath(
555 555 path, normed, ignoremissing, exists, self._map.filefoldmap
556 556 )
557 557 return folded
558 558
559 559 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
560 560 normed = util.normcase(path)
561 561 folded = self._map.filefoldmap.get(normed, None)
562 562 if folded is None:
563 563 folded = self._map.dirfoldmap.get(normed, None)
564 564 if folded is None:
565 565 if isknown:
566 566 folded = path
567 567 else:
568 568 # store discovered result in dirfoldmap so that future
569 569 # normalizefile calls don't start matching directories
570 570 folded = self._discoverpath(
571 571 path, normed, ignoremissing, exists, self._map.dirfoldmap
572 572 )
573 573 return folded
574 574
575 575 def normalize(self, path, isknown=False, ignoremissing=False):
576 576 '''
577 577 normalize the case of a pathname when on a casefolding filesystem
578 578
579 579 isknown specifies whether the filename came from walking the
580 580 disk, to avoid extra filesystem access.
581 581
582 582 If ignoremissing is True, missing path are returned
583 583 unchanged. Otherwise, we try harder to normalize possibly
584 584 existing path components.
585 585
586 586 The normalized case is determined based on the following precedence:
587 587
588 588 - version of name already stored in the dirstate
589 589 - version of name stored on disk
590 590 - version provided via command arguments
591 591 '''
592 592
593 593 if self._checkcase:
594 594 return self._normalize(path, isknown, ignoremissing)
595 595 return path
596 596
597 597 def clear(self):
598 598 self._map.clear()
599 599 self._lastnormaltime = 0
600 600 self._updatedfiles.clear()
601 601 self._dirty = True
602 602
603 603 def rebuild(self, parent, allfiles, changedfiles=None):
604 604 if changedfiles is None:
605 605 # Rebuild entire dirstate
606 606 changedfiles = allfiles
607 607 lastnormaltime = self._lastnormaltime
608 608 self.clear()
609 609 self._lastnormaltime = lastnormaltime
610 610
611 611 if self._origpl is None:
612 612 self._origpl = self._pl
613 613 self._map.setparents(parent, nullid)
614 614 for f in changedfiles:
615 615 if f in allfiles:
616 616 self.normallookup(f)
617 617 else:
618 618 self.drop(f)
619 619
620 620 self._dirty = True
621 621
622 622 def identity(self):
623 623 '''Return identity of dirstate itself to detect changing in storage
624 624
625 625 If identity of previous dirstate is equal to this, writing
626 626 changes based on the former dirstate out can keep consistency.
627 627 '''
628 628 return self._map.identity
629 629
630 630 def write(self, tr):
631 631 if not self._dirty:
632 632 return
633 633
634 634 filename = self._filename
635 635 if tr:
636 636 # 'dirstate.write()' is not only for writing in-memory
637 637 # changes out, but also for dropping ambiguous timestamp.
638 638 # delayed writing re-raise "ambiguous timestamp issue".
639 639 # See also the wiki page below for detail:
640 640 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
641 641
642 642 # emulate dropping timestamp in 'parsers.pack_dirstate'
643 643 now = _getfsnow(self._opener)
644 644 self._map.clearambiguoustimes(self._updatedfiles, now)
645 645
646 646 # emulate that all 'dirstate.normal' results are written out
647 647 self._lastnormaltime = 0
648 648 self._updatedfiles.clear()
649 649
650 650 # delay writing in-memory changes out
651 651 tr.addfilegenerator(
652 652 b'dirstate',
653 653 (self._filename,),
654 654 self._writedirstate,
655 655 location=b'plain',
656 656 )
657 657 return
658 658
659 659 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
660 660 self._writedirstate(st)
661 661
662 662 def addparentchangecallback(self, category, callback):
663 663 """add a callback to be called when the wd parents are changed
664 664
665 665 Callback will be called with the following arguments:
666 666 dirstate, (oldp1, oldp2), (newp1, newp2)
667 667
668 668 Category is a unique identifier to allow overwriting an old callback
669 669 with a newer callback.
670 670 """
671 671 self._plchangecallbacks[category] = callback
672 672
673 673 def _writedirstate(self, st):
674 674 # notify callbacks about parents change
675 675 if self._origpl is not None and self._origpl != self._pl:
676 676 for c, callback in sorted(
677 677 pycompat.iteritems(self._plchangecallbacks)
678 678 ):
679 679 callback(self, self._origpl, self._pl)
680 680 self._origpl = None
681 681 # use the modification time of the newly created temporary file as the
682 682 # filesystem's notion of 'now'
683 683 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
684 684
685 685 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
686 686 # timestamp of each entries in dirstate, because of 'now > mtime'
687 687 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
688 688 if delaywrite > 0:
689 689 # do we have any files to delay for?
690 690 for f, e in pycompat.iteritems(self._map):
691 691 if e[0] == b'n' and e[3] == now:
692 692 import time # to avoid useless import
693 693
694 694 # rather than sleep n seconds, sleep until the next
695 695 # multiple of n seconds
696 696 clock = time.time()
697 697 start = int(clock) - (int(clock) % delaywrite)
698 698 end = start + delaywrite
699 699 time.sleep(end - clock)
700 700 now = end # trust our estimate that the end is near now
701 701 break
702 702
703 703 self._map.write(st, now)
704 704 self._lastnormaltime = 0
705 705 self._dirty = False
706 706
707 707 def _dirignore(self, f):
708 708 if self._ignore(f):
709 709 return True
710 710 for p in pathutil.finddirs(f):
711 711 if self._ignore(p):
712 712 return True
713 713 return False
714 714
715 715 def _ignorefiles(self):
716 716 files = []
717 717 if os.path.exists(self._join(b'.hgignore')):
718 718 files.append(self._join(b'.hgignore'))
719 719 for name, path in self._ui.configitems(b"ui"):
720 720 if name == b'ignore' or name.startswith(b'ignore.'):
721 721 # we need to use os.path.join here rather than self._join
722 722 # because path is arbitrary and user-specified
723 723 files.append(os.path.join(self._rootdir, util.expandpath(path)))
724 724 return files
725 725
726 726 def _ignorefileandline(self, f):
727 727 files = collections.deque(self._ignorefiles())
728 728 visited = set()
729 729 while files:
730 730 i = files.popleft()
731 731 patterns = matchmod.readpatternfile(
732 732 i, self._ui.warn, sourceinfo=True
733 733 )
734 734 for pattern, lineno, line in patterns:
735 735 kind, p = matchmod._patsplit(pattern, b'glob')
736 736 if kind == b"subinclude":
737 737 if p not in visited:
738 738 files.append(p)
739 739 continue
740 740 m = matchmod.match(
741 741 self._root, b'', [], [pattern], warn=self._ui.warn
742 742 )
743 743 if m(f):
744 744 return (i, lineno, line)
745 745 visited.add(i)
746 746 return (None, -1, b"")
747 747
748 748 def _walkexplicit(self, match, subrepos):
749 749 '''Get stat data about the files explicitly specified by match.
750 750
751 751 Return a triple (results, dirsfound, dirsnotfound).
752 752 - results is a mapping from filename to stat result. It also contains
753 753 listings mapping subrepos and .hg to None.
754 754 - dirsfound is a list of files found to be directories.
755 755 - dirsnotfound is a list of files that the dirstate thinks are
756 756 directories and that were not found.'''
757 757
758 758 def badtype(mode):
759 759 kind = _(b'unknown')
760 760 if stat.S_ISCHR(mode):
761 761 kind = _(b'character device')
762 762 elif stat.S_ISBLK(mode):
763 763 kind = _(b'block device')
764 764 elif stat.S_ISFIFO(mode):
765 765 kind = _(b'fifo')
766 766 elif stat.S_ISSOCK(mode):
767 767 kind = _(b'socket')
768 768 elif stat.S_ISDIR(mode):
769 769 kind = _(b'directory')
770 770 return _(b'unsupported file type (type is %s)') % kind
771 771
772 772 matchedir = match.explicitdir
773 773 badfn = match.bad
774 774 dmap = self._map
775 775 lstat = os.lstat
776 776 getkind = stat.S_IFMT
777 777 dirkind = stat.S_IFDIR
778 778 regkind = stat.S_IFREG
779 779 lnkkind = stat.S_IFLNK
780 780 join = self._join
781 781 dirsfound = []
782 782 foundadd = dirsfound.append
783 783 dirsnotfound = []
784 784 notfoundadd = dirsnotfound.append
785 785
786 786 if not match.isexact() and self._checkcase:
787 787 normalize = self._normalize
788 788 else:
789 789 normalize = None
790 790
791 791 files = sorted(match.files())
792 792 subrepos.sort()
793 793 i, j = 0, 0
794 794 while i < len(files) and j < len(subrepos):
795 795 subpath = subrepos[j] + b"/"
796 796 if files[i] < subpath:
797 797 i += 1
798 798 continue
799 799 while i < len(files) and files[i].startswith(subpath):
800 800 del files[i]
801 801 j += 1
802 802
803 803 if not files or b'' in files:
804 804 files = [b'']
805 805 # constructing the foldmap is expensive, so don't do it for the
806 806 # common case where files is ['']
807 807 normalize = None
808 808 results = dict.fromkeys(subrepos)
809 809 results[b'.hg'] = None
810 810
811 811 for ff in files:
812 812 if normalize:
813 813 nf = normalize(ff, False, True)
814 814 else:
815 815 nf = ff
816 816 if nf in results:
817 817 continue
818 818
819 819 try:
820 820 st = lstat(join(nf))
821 821 kind = getkind(st.st_mode)
822 822 if kind == dirkind:
823 823 if nf in dmap:
824 824 # file replaced by dir on disk but still in dirstate
825 825 results[nf] = None
826 826 if matchedir:
827 827 matchedir(nf)
828 828 foundadd((nf, ff))
829 829 elif kind == regkind or kind == lnkkind:
830 830 results[nf] = st
831 831 else:
832 832 badfn(ff, badtype(kind))
833 833 if nf in dmap:
834 834 results[nf] = None
835 835 except OSError as inst: # nf not found on disk - it is dirstate only
836 836 if nf in dmap: # does it exactly match a missing file?
837 837 results[nf] = None
838 838 else: # does it match a missing directory?
839 839 if self._map.hasdir(nf):
840 840 if matchedir:
841 841 matchedir(nf)
842 842 notfoundadd(nf)
843 843 else:
844 844 badfn(ff, encoding.strtolocal(inst.strerror))
845 845
846 846 # match.files() may contain explicitly-specified paths that shouldn't
847 847 # be taken; drop them from the list of files found. dirsfound/notfound
848 848 # aren't filtered here because they will be tested later.
849 849 if match.anypats():
850 850 for f in list(results):
851 851 if f == b'.hg' or f in subrepos:
852 852 # keep sentinel to disable further out-of-repo walks
853 853 continue
854 854 if not match(f):
855 855 del results[f]
856 856
857 857 # Case insensitive filesystems cannot rely on lstat() failing to detect
858 858 # a case-only rename. Prune the stat object for any file that does not
859 859 # match the case in the filesystem, if there are multiple files that
860 860 # normalize to the same path.
861 861 if match.isexact() and self._checkcase:
862 862 normed = {}
863 863
864 864 for f, st in pycompat.iteritems(results):
865 865 if st is None:
866 866 continue
867 867
868 868 nc = util.normcase(f)
869 869 paths = normed.get(nc)
870 870
871 871 if paths is None:
872 872 paths = set()
873 873 normed[nc] = paths
874 874
875 875 paths.add(f)
876 876
877 877 for norm, paths in pycompat.iteritems(normed):
878 878 if len(paths) > 1:
879 879 for path in paths:
880 880 folded = self._discoverpath(
881 881 path, norm, True, None, self._map.dirfoldmap
882 882 )
883 883 if path != folded:
884 884 results[path] = None
885 885
886 886 return results, dirsfound, dirsnotfound
887 887
888 888 def walk(self, match, subrepos, unknown, ignored, full=True):
889 889 '''
890 890 Walk recursively through the directory tree, finding all files
891 891 matched by match.
892 892
893 893 If full is False, maybe skip some known-clean files.
894 894
895 895 Return a dict mapping filename to stat-like object (either
896 896 mercurial.osutil.stat instance or return value of os.stat()).
897 897
898 898 '''
899 899 # full is a flag that extensions that hook into walk can use -- this
900 900 # implementation doesn't use it at all. This satisfies the contract
901 901 # because we only guarantee a "maybe".
902 902
903 903 if ignored:
904 904 ignore = util.never
905 905 dirignore = util.never
906 906 elif unknown:
907 907 ignore = self._ignore
908 908 dirignore = self._dirignore
909 909 else:
910 910 # if not unknown and not ignored, drop dir recursion and step 2
911 911 ignore = util.always
912 912 dirignore = util.always
913 913
914 914 matchfn = match.matchfn
915 915 matchalways = match.always()
916 916 matchtdir = match.traversedir
917 917 dmap = self._map
918 918 listdir = util.listdir
919 919 lstat = os.lstat
920 920 dirkind = stat.S_IFDIR
921 921 regkind = stat.S_IFREG
922 922 lnkkind = stat.S_IFLNK
923 923 join = self._join
924 924
925 925 exact = skipstep3 = False
926 926 if match.isexact(): # match.exact
927 927 exact = True
928 928 dirignore = util.always # skip step 2
929 929 elif match.prefix(): # match.match, no patterns
930 930 skipstep3 = True
931 931
932 932 if not exact and self._checkcase:
933 933 normalize = self._normalize
934 934 normalizefile = self._normalizefile
935 935 skipstep3 = False
936 936 else:
937 937 normalize = self._normalize
938 938 normalizefile = None
939 939
940 940 # step 1: find all explicit files
941 941 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
942 if matchtdir:
943 for d in work:
944 matchtdir(d[0])
945 for d in dirsnotfound:
946 matchtdir(d)
942 947
943 948 skipstep3 = skipstep3 and not (work or dirsnotfound)
944 949 work = [d for d in work if not dirignore(d[0])]
945 950
946 951 # step 2: visit subdirectories
947 952 def traverse(work, alreadynormed):
948 953 wadd = work.append
949 954 while work:
950 955 tracing.counter('dirstate.walk work', len(work))
951 956 nd = work.pop()
952 957 visitentries = match.visitchildrenset(nd)
953 958 if not visitentries:
954 959 continue
955 960 if visitentries == b'this' or visitentries == b'all':
956 961 visitentries = None
957 962 skip = None
958 963 if nd != b'':
959 964 skip = b'.hg'
960 965 try:
961 966 with tracing.log('dirstate.walk.traverse listdir %s', nd):
962 967 entries = listdir(join(nd), stat=True, skip=skip)
963 968 except OSError as inst:
964 969 if inst.errno in (errno.EACCES, errno.ENOENT):
965 970 match.bad(
966 971 self.pathto(nd), encoding.strtolocal(inst.strerror)
967 972 )
968 973 continue
969 974 raise
970 975 for f, kind, st in entries:
971 976 # Some matchers may return files in the visitentries set,
972 977 # instead of 'this', if the matcher explicitly mentions them
973 978 # and is not an exactmatcher. This is acceptable; we do not
974 979 # make any hard assumptions about file-or-directory below
975 980 # based on the presence of `f` in visitentries. If
976 981 # visitchildrenset returned a set, we can always skip the
977 982 # entries *not* in the set it provided regardless of whether
978 983 # they're actually a file or a directory.
979 984 if visitentries and f not in visitentries:
980 985 continue
981 986 if normalizefile:
982 987 # even though f might be a directory, we're only
983 988 # interested in comparing it to files currently in the
984 989 # dmap -- therefore normalizefile is enough
985 990 nf = normalizefile(
986 991 nd and (nd + b"/" + f) or f, True, True
987 992 )
988 993 else:
989 994 nf = nd and (nd + b"/" + f) or f
990 995 if nf not in results:
991 996 if kind == dirkind:
992 997 if not ignore(nf):
993 998 if matchtdir:
994 999 matchtdir(nf)
995 1000 wadd(nf)
996 1001 if nf in dmap and (matchalways or matchfn(nf)):
997 1002 results[nf] = None
998 1003 elif kind == regkind or kind == lnkkind:
999 1004 if nf in dmap:
1000 1005 if matchalways or matchfn(nf):
1001 1006 results[nf] = st
1002 1007 elif (matchalways or matchfn(nf)) and not ignore(
1003 1008 nf
1004 1009 ):
1005 1010 # unknown file -- normalize if necessary
1006 1011 if not alreadynormed:
1007 1012 nf = normalize(nf, False, True)
1008 1013 results[nf] = st
1009 1014 elif nf in dmap and (matchalways or matchfn(nf)):
1010 1015 results[nf] = None
1011 1016
1012 1017 for nd, d in work:
1013 1018 # alreadynormed means that processwork doesn't have to do any
1014 1019 # expensive directory normalization
1015 1020 alreadynormed = not normalize or nd == d
1016 1021 traverse([d], alreadynormed)
1017 1022
1018 1023 for s in subrepos:
1019 1024 del results[s]
1020 1025 del results[b'.hg']
1021 1026
1022 1027 # step 3: visit remaining files from dmap
1023 1028 if not skipstep3 and not exact:
1024 1029 # If a dmap file is not in results yet, it was either
1025 1030 # a) not matching matchfn b) ignored, c) missing, or d) under a
1026 1031 # symlink directory.
1027 1032 if not results and matchalways:
1028 1033 visit = [f for f in dmap]
1029 1034 else:
1030 1035 visit = [f for f in dmap if f not in results and matchfn(f)]
1031 1036 visit.sort()
1032 1037
1033 1038 if unknown:
1034 1039 # unknown == True means we walked all dirs under the roots
1035 1040 # that wasn't ignored, and everything that matched was stat'ed
1036 1041 # and is already in results.
1037 1042 # The rest must thus be ignored or under a symlink.
1038 1043 audit_path = pathutil.pathauditor(self._root, cached=True)
1039 1044
1040 1045 for nf in iter(visit):
1041 1046 # If a stat for the same file was already added with a
1042 1047 # different case, don't add one for this, since that would
1043 1048 # make it appear as if the file exists under both names
1044 1049 # on disk.
1045 1050 if (
1046 1051 normalizefile
1047 1052 and normalizefile(nf, True, True) in results
1048 1053 ):
1049 1054 results[nf] = None
1050 1055 # Report ignored items in the dmap as long as they are not
1051 1056 # under a symlink directory.
1052 1057 elif audit_path.check(nf):
1053 1058 try:
1054 1059 results[nf] = lstat(join(nf))
1055 1060 # file was just ignored, no links, and exists
1056 1061 except OSError:
1057 1062 # file doesn't exist
1058 1063 results[nf] = None
1059 1064 else:
1060 1065 # It's either missing or under a symlink directory
1061 1066 # which we in this case report as missing
1062 1067 results[nf] = None
1063 1068 else:
1064 1069 # We may not have walked the full directory tree above,
1065 1070 # so stat and check everything we missed.
1066 1071 iv = iter(visit)
1067 1072 for st in util.statfiles([join(i) for i in visit]):
1068 1073 results[next(iv)] = st
1069 1074 return results
1070 1075
1071 1076 def status(self, match, subrepos, ignored, clean, unknown):
1072 1077 '''Determine the status of the working copy relative to the
1073 1078 dirstate and return a pair of (unsure, status), where status is of type
1074 1079 scmutil.status and:
1075 1080
1076 1081 unsure:
1077 1082 files that might have been modified since the dirstate was
1078 1083 written, but need to be read to be sure (size is the same
1079 1084 but mtime differs)
1080 1085 status.modified:
1081 1086 files that have definitely been modified since the dirstate
1082 1087 was written (different size or mode)
1083 1088 status.clean:
1084 1089 files that have definitely not been modified since the
1085 1090 dirstate was written
1086 1091 '''
1087 1092 listignored, listclean, listunknown = ignored, clean, unknown
1088 1093 lookup, modified, added, unknown, ignored = [], [], [], [], []
1089 1094 removed, deleted, clean = [], [], []
1090 1095
1091 1096 dmap = self._map
1092 1097 dmap.preload()
1093 1098
1094 1099 use_rust = True
1095 1100 if rustmod is None:
1096 1101 use_rust = False
1097 1102 elif subrepos:
1098 1103 use_rust = False
1099 1104 if bool(listunknown):
1100 1105 # Pathauditor does not exist yet in Rust, unknown files
1101 1106 # can't be trusted.
1102 1107 use_rust = False
1103 1108 elif self._ignorefiles() and listignored:
1104 1109 # Rust has no ignore mechanism yet, so don't use Rust for
1105 1110 # commands that need ignore.
1106 1111 use_rust = False
1107 1112 elif not match.always():
1108 1113 # Matchers have yet to be implemented
1109 1114 use_rust = False
1110 1115
1111 1116 if use_rust:
1112 1117 # Force Rayon (Rust parallelism library) to respect the number of
1113 1118 # workers. This is a temporary workaround until Rust code knows
1114 1119 # how to read the config file.
1115 1120 numcpus = self._ui.configint(b"worker", b"numcpus")
1116 1121 if numcpus is not None:
1117 1122 encoding.environ.setdefault(
1118 1123 b'RAYON_NUM_THREADS', b'%d' % numcpus
1119 1124 )
1120 1125
1121 1126 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1122 1127 if not workers_enabled:
1123 1128 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1124 1129
1125 1130 (
1126 1131 lookup,
1127 1132 modified,
1128 1133 added,
1129 1134 removed,
1130 1135 deleted,
1131 1136 unknown,
1132 1137 clean,
1133 1138 ) = rustmod.status(
1134 1139 dmap._rustmap,
1135 1140 self._rootdir,
1136 1141 bool(listclean),
1137 1142 self._lastnormaltime,
1138 1143 self._checkexec,
1139 1144 )
1140 1145
1141 1146 status = scmutil.status(
1142 1147 modified=modified,
1143 1148 added=added,
1144 1149 removed=removed,
1145 1150 deleted=deleted,
1146 1151 unknown=unknown,
1147 1152 ignored=ignored,
1148 1153 clean=clean,
1149 1154 )
1150 1155 return (lookup, status)
1151 1156
1152 1157 def noop(f):
1153 1158 pass
1154 1159
1155 1160 dcontains = dmap.__contains__
1156 1161 dget = dmap.__getitem__
1157 1162 ladd = lookup.append # aka "unsure"
1158 1163 madd = modified.append
1159 1164 aadd = added.append
1160 1165 uadd = unknown.append if listunknown else noop
1161 1166 iadd = ignored.append if listignored else noop
1162 1167 radd = removed.append
1163 1168 dadd = deleted.append
1164 1169 cadd = clean.append if listclean else noop
1165 1170 mexact = match.exact
1166 1171 dirignore = self._dirignore
1167 1172 checkexec = self._checkexec
1168 1173 copymap = self._map.copymap
1169 1174 lastnormaltime = self._lastnormaltime
1170 1175
1171 1176 # We need to do full walks when either
1172 1177 # - we're listing all clean files, or
1173 1178 # - match.traversedir does something, because match.traversedir should
1174 1179 # be called for every dir in the working dir
1175 1180 full = listclean or match.traversedir is not None
1176 1181 for fn, st in pycompat.iteritems(
1177 1182 self.walk(match, subrepos, listunknown, listignored, full=full)
1178 1183 ):
1179 1184 if not dcontains(fn):
1180 1185 if (listignored or mexact(fn)) and dirignore(fn):
1181 1186 if listignored:
1182 1187 iadd(fn)
1183 1188 else:
1184 1189 uadd(fn)
1185 1190 continue
1186 1191
1187 1192 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1188 1193 # written like that for performance reasons. dmap[fn] is not a
1189 1194 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1190 1195 # opcode has fast paths when the value to be unpacked is a tuple or
1191 1196 # a list, but falls back to creating a full-fledged iterator in
1192 1197 # general. That is much slower than simply accessing and storing the
1193 1198 # tuple members one by one.
1194 1199 t = dget(fn)
1195 1200 state = t[0]
1196 1201 mode = t[1]
1197 1202 size = t[2]
1198 1203 time = t[3]
1199 1204
1200 1205 if not st and state in b"nma":
1201 1206 dadd(fn)
1202 1207 elif state == b'n':
1203 1208 if (
1204 1209 size >= 0
1205 1210 and (
1206 1211 (size != st.st_size and size != st.st_size & _rangemask)
1207 1212 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1208 1213 )
1209 1214 or size == -2 # other parent
1210 1215 or fn in copymap
1211 1216 ):
1212 1217 madd(fn)
1213 1218 elif (
1214 1219 time != st[stat.ST_MTIME]
1215 1220 and time != st[stat.ST_MTIME] & _rangemask
1216 1221 ):
1217 1222 ladd(fn)
1218 1223 elif st[stat.ST_MTIME] == lastnormaltime:
1219 1224 # fn may have just been marked as normal and it may have
1220 1225 # changed in the same second without changing its size.
1221 1226 # This can happen if we quickly do multiple commits.
1222 1227 # Force lookup, so we don't miss such a racy file change.
1223 1228 ladd(fn)
1224 1229 elif listclean:
1225 1230 cadd(fn)
1226 1231 elif state == b'm':
1227 1232 madd(fn)
1228 1233 elif state == b'a':
1229 1234 aadd(fn)
1230 1235 elif state == b'r':
1231 1236 radd(fn)
1232 1237
1233 1238 return (
1234 1239 lookup,
1235 1240 scmutil.status(
1236 1241 modified, added, removed, deleted, unknown, ignored, clean
1237 1242 ),
1238 1243 )
1239 1244
1240 1245 def matches(self, match):
1241 1246 '''
1242 1247 return files in the dirstate (in whatever state) filtered by match
1243 1248 '''
1244 1249 dmap = self._map
1245 1250 if match.always():
1246 1251 return dmap.keys()
1247 1252 files = match.files()
1248 1253 if match.isexact():
1249 1254 # fast path -- filter the other way around, since typically files is
1250 1255 # much smaller than dmap
1251 1256 return [f for f in files if f in dmap]
1252 1257 if match.prefix() and all(fn in dmap for fn in files):
1253 1258 # fast path -- all the values are known to be files, so just return
1254 1259 # that
1255 1260 return list(files)
1256 1261 return [f for f in dmap if match(f)]
1257 1262
1258 1263 def _actualfilename(self, tr):
1259 1264 if tr:
1260 1265 return self._pendingfilename
1261 1266 else:
1262 1267 return self._filename
1263 1268
1264 1269 def savebackup(self, tr, backupname):
1265 1270 '''Save current dirstate into backup file'''
1266 1271 filename = self._actualfilename(tr)
1267 1272 assert backupname != filename
1268 1273
1269 1274 # use '_writedirstate' instead of 'write' to write changes certainly,
1270 1275 # because the latter omits writing out if transaction is running.
1271 1276 # output file will be used to create backup of dirstate at this point.
1272 1277 if self._dirty or not self._opener.exists(filename):
1273 1278 self._writedirstate(
1274 1279 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1275 1280 )
1276 1281
1277 1282 if tr:
1278 1283 # ensure that subsequent tr.writepending returns True for
1279 1284 # changes written out above, even if dirstate is never
1280 1285 # changed after this
1281 1286 tr.addfilegenerator(
1282 1287 b'dirstate',
1283 1288 (self._filename,),
1284 1289 self._writedirstate,
1285 1290 location=b'plain',
1286 1291 )
1287 1292
1288 1293 # ensure that pending file written above is unlinked at
1289 1294 # failure, even if tr.writepending isn't invoked until the
1290 1295 # end of this transaction
1291 1296 tr.registertmp(filename, location=b'plain')
1292 1297
1293 1298 self._opener.tryunlink(backupname)
1294 1299 # hardlink backup is okay because _writedirstate is always called
1295 1300 # with an "atomictemp=True" file.
1296 1301 util.copyfile(
1297 1302 self._opener.join(filename),
1298 1303 self._opener.join(backupname),
1299 1304 hardlink=True,
1300 1305 )
1301 1306
1302 1307 def restorebackup(self, tr, backupname):
1303 1308 '''Restore dirstate by backup file'''
1304 1309 # this "invalidate()" prevents "wlock.release()" from writing
1305 1310 # changes of dirstate out after restoring from backup file
1306 1311 self.invalidate()
1307 1312 filename = self._actualfilename(tr)
1308 1313 o = self._opener
1309 1314 if util.samefile(o.join(backupname), o.join(filename)):
1310 1315 o.unlink(backupname)
1311 1316 else:
1312 1317 o.rename(backupname, filename, checkambig=True)
1313 1318
1314 1319 def clearbackup(self, tr, backupname):
1315 1320 '''Clear backup file'''
1316 1321 self._opener.unlink(backupname)
1317 1322
1318 1323
1319 1324 class dirstatemap(object):
1320 1325 """Map encapsulating the dirstate's contents.
1321 1326
1322 1327 The dirstate contains the following state:
1323 1328
1324 1329 - `identity` is the identity of the dirstate file, which can be used to
1325 1330 detect when changes have occurred to the dirstate file.
1326 1331
1327 1332 - `parents` is a pair containing the parents of the working copy. The
1328 1333 parents are updated by calling `setparents`.
1329 1334
1330 1335 - the state map maps filenames to tuples of (state, mode, size, mtime),
1331 1336 where state is a single character representing 'normal', 'added',
1332 1337 'removed', or 'merged'. It is read by treating the dirstate as a
1333 1338 dict. File state is updated by calling the `addfile`, `removefile` and
1334 1339 `dropfile` methods.
1335 1340
1336 1341 - `copymap` maps destination filenames to their source filename.
1337 1342
1338 1343 The dirstate also provides the following views onto the state:
1339 1344
1340 1345 - `nonnormalset` is a set of the filenames that have state other
1341 1346 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1342 1347
1343 1348 - `otherparentset` is a set of the filenames that are marked as coming
1344 1349 from the second parent when the dirstate is currently being merged.
1345 1350
1346 1351 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1347 1352 form that they appear as in the dirstate.
1348 1353
1349 1354 - `dirfoldmap` is a dict mapping normalized directory names to the
1350 1355 denormalized form that they appear as in the dirstate.
1351 1356 """
1352 1357
1353 1358 def __init__(self, ui, opener, root):
1354 1359 self._ui = ui
1355 1360 self._opener = opener
1356 1361 self._root = root
1357 1362 self._filename = b'dirstate'
1358 1363
1359 1364 self._parents = None
1360 1365 self._dirtyparents = False
1361 1366
1362 1367 # for consistent view between _pl() and _read() invocations
1363 1368 self._pendingmode = None
1364 1369
1365 1370 @propertycache
1366 1371 def _map(self):
1367 1372 self._map = {}
1368 1373 self.read()
1369 1374 return self._map
1370 1375
1371 1376 @propertycache
1372 1377 def copymap(self):
1373 1378 self.copymap = {}
1374 1379 self._map
1375 1380 return self.copymap
1376 1381
1377 1382 def clear(self):
1378 1383 self._map.clear()
1379 1384 self.copymap.clear()
1380 1385 self.setparents(nullid, nullid)
1381 1386 util.clearcachedproperty(self, b"_dirs")
1382 1387 util.clearcachedproperty(self, b"_alldirs")
1383 1388 util.clearcachedproperty(self, b"filefoldmap")
1384 1389 util.clearcachedproperty(self, b"dirfoldmap")
1385 1390 util.clearcachedproperty(self, b"nonnormalset")
1386 1391 util.clearcachedproperty(self, b"otherparentset")
1387 1392
1388 1393 def items(self):
1389 1394 return pycompat.iteritems(self._map)
1390 1395
1391 1396 # forward for python2,3 compat
1392 1397 iteritems = items
1393 1398
1394 1399 def __len__(self):
1395 1400 return len(self._map)
1396 1401
1397 1402 def __iter__(self):
1398 1403 return iter(self._map)
1399 1404
1400 1405 def get(self, key, default=None):
1401 1406 return self._map.get(key, default)
1402 1407
1403 1408 def __contains__(self, key):
1404 1409 return key in self._map
1405 1410
1406 1411 def __getitem__(self, key):
1407 1412 return self._map[key]
1408 1413
1409 1414 def keys(self):
1410 1415 return self._map.keys()
1411 1416
1412 1417 def preload(self):
1413 1418 """Loads the underlying data, if it's not already loaded"""
1414 1419 self._map
1415 1420
1416 1421 def addfile(self, f, oldstate, state, mode, size, mtime):
1417 1422 """Add a tracked file to the dirstate."""
1418 1423 if oldstate in b"?r" and "_dirs" in self.__dict__:
1419 1424 self._dirs.addpath(f)
1420 1425 if oldstate == b"?" and "_alldirs" in self.__dict__:
1421 1426 self._alldirs.addpath(f)
1422 1427 self._map[f] = dirstatetuple(state, mode, size, mtime)
1423 1428 if state != b'n' or mtime == -1:
1424 1429 self.nonnormalset.add(f)
1425 1430 if size == -2:
1426 1431 self.otherparentset.add(f)
1427 1432
1428 1433 def removefile(self, f, oldstate, size):
1429 1434 """
1430 1435 Mark a file as removed in the dirstate.
1431 1436
1432 1437 The `size` parameter is used to store sentinel values that indicate
1433 1438 the file's previous state. In the future, we should refactor this
1434 1439 to be more explicit about what that state is.
1435 1440 """
1436 1441 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1437 1442 self._dirs.delpath(f)
1438 1443 if oldstate == b"?" and "_alldirs" in self.__dict__:
1439 1444 self._alldirs.addpath(f)
1440 1445 if "filefoldmap" in self.__dict__:
1441 1446 normed = util.normcase(f)
1442 1447 self.filefoldmap.pop(normed, None)
1443 1448 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1444 1449 self.nonnormalset.add(f)
1445 1450
1446 1451 def dropfile(self, f, oldstate):
1447 1452 """
1448 1453 Remove a file from the dirstate. Returns True if the file was
1449 1454 previously recorded.
1450 1455 """
1451 1456 exists = self._map.pop(f, None) is not None
1452 1457 if exists:
1453 1458 if oldstate != b"r" and "_dirs" in self.__dict__:
1454 1459 self._dirs.delpath(f)
1455 1460 if "_alldirs" in self.__dict__:
1456 1461 self._alldirs.delpath(f)
1457 1462 if "filefoldmap" in self.__dict__:
1458 1463 normed = util.normcase(f)
1459 1464 self.filefoldmap.pop(normed, None)
1460 1465 self.nonnormalset.discard(f)
1461 1466 return exists
1462 1467
1463 1468 def clearambiguoustimes(self, files, now):
1464 1469 for f in files:
1465 1470 e = self.get(f)
1466 1471 if e is not None and e[0] == b'n' and e[3] == now:
1467 1472 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1468 1473 self.nonnormalset.add(f)
1469 1474
1470 1475 def nonnormalentries(self):
1471 1476 '''Compute the nonnormal dirstate entries from the dmap'''
1472 1477 try:
1473 1478 return parsers.nonnormalotherparententries(self._map)
1474 1479 except AttributeError:
1475 1480 nonnorm = set()
1476 1481 otherparent = set()
1477 1482 for fname, e in pycompat.iteritems(self._map):
1478 1483 if e[0] != b'n' or e[3] == -1:
1479 1484 nonnorm.add(fname)
1480 1485 if e[0] == b'n' and e[2] == -2:
1481 1486 otherparent.add(fname)
1482 1487 return nonnorm, otherparent
1483 1488
1484 1489 @propertycache
1485 1490 def filefoldmap(self):
1486 1491 """Returns a dictionary mapping normalized case paths to their
1487 1492 non-normalized versions.
1488 1493 """
1489 1494 try:
1490 1495 makefilefoldmap = parsers.make_file_foldmap
1491 1496 except AttributeError:
1492 1497 pass
1493 1498 else:
1494 1499 return makefilefoldmap(
1495 1500 self._map, util.normcasespec, util.normcasefallback
1496 1501 )
1497 1502
1498 1503 f = {}
1499 1504 normcase = util.normcase
1500 1505 for name, s in pycompat.iteritems(self._map):
1501 1506 if s[0] != b'r':
1502 1507 f[normcase(name)] = name
1503 1508 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1504 1509 return f
1505 1510
1506 1511 def hastrackeddir(self, d):
1507 1512 """
1508 1513 Returns True if the dirstate contains a tracked (not removed) file
1509 1514 in this directory.
1510 1515 """
1511 1516 return d in self._dirs
1512 1517
1513 1518 def hasdir(self, d):
1514 1519 """
1515 1520 Returns True if the dirstate contains a file (tracked or removed)
1516 1521 in this directory.
1517 1522 """
1518 1523 return d in self._alldirs
1519 1524
1520 1525 @propertycache
1521 1526 def _dirs(self):
1522 1527 return pathutil.dirs(self._map, b'r')
1523 1528
1524 1529 @propertycache
1525 1530 def _alldirs(self):
1526 1531 return pathutil.dirs(self._map)
1527 1532
1528 1533 def _opendirstatefile(self):
1529 1534 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1530 1535 if self._pendingmode is not None and self._pendingmode != mode:
1531 1536 fp.close()
1532 1537 raise error.Abort(
1533 1538 _(b'working directory state may be changed parallelly')
1534 1539 )
1535 1540 self._pendingmode = mode
1536 1541 return fp
1537 1542
1538 1543 def parents(self):
1539 1544 if not self._parents:
1540 1545 try:
1541 1546 fp = self._opendirstatefile()
1542 1547 st = fp.read(40)
1543 1548 fp.close()
1544 1549 except IOError as err:
1545 1550 if err.errno != errno.ENOENT:
1546 1551 raise
1547 1552 # File doesn't exist, so the current state is empty
1548 1553 st = b''
1549 1554
1550 1555 l = len(st)
1551 1556 if l == 40:
1552 1557 self._parents = (st[:20], st[20:40])
1553 1558 elif l == 0:
1554 1559 self._parents = (nullid, nullid)
1555 1560 else:
1556 1561 raise error.Abort(
1557 1562 _(b'working directory state appears damaged!')
1558 1563 )
1559 1564
1560 1565 return self._parents
1561 1566
1562 1567 def setparents(self, p1, p2):
1563 1568 self._parents = (p1, p2)
1564 1569 self._dirtyparents = True
1565 1570
1566 1571 def read(self):
1567 1572 # ignore HG_PENDING because identity is used only for writing
1568 1573 self.identity = util.filestat.frompath(
1569 1574 self._opener.join(self._filename)
1570 1575 )
1571 1576
1572 1577 try:
1573 1578 fp = self._opendirstatefile()
1574 1579 try:
1575 1580 st = fp.read()
1576 1581 finally:
1577 1582 fp.close()
1578 1583 except IOError as err:
1579 1584 if err.errno != errno.ENOENT:
1580 1585 raise
1581 1586 return
1582 1587 if not st:
1583 1588 return
1584 1589
1585 1590 if util.safehasattr(parsers, b'dict_new_presized'):
1586 1591 # Make an estimate of the number of files in the dirstate based on
1587 1592 # its size. From a linear regression on a set of real-world repos,
1588 1593 # all over 10,000 files, the size of a dirstate entry is 85
1589 1594 # bytes. The cost of resizing is significantly higher than the cost
1590 1595 # of filling in a larger presized dict, so subtract 20% from the
1591 1596 # size.
1592 1597 #
1593 1598 # This heuristic is imperfect in many ways, so in a future dirstate
1594 1599 # format update it makes sense to just record the number of entries
1595 1600 # on write.
1596 1601 self._map = parsers.dict_new_presized(len(st) // 71)
1597 1602
1598 1603 # Python's garbage collector triggers a GC each time a certain number
1599 1604 # of container objects (the number being defined by
1600 1605 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1601 1606 # for each file in the dirstate. The C version then immediately marks
1602 1607 # them as not to be tracked by the collector. However, this has no
1603 1608 # effect on when GCs are triggered, only on what objects the GC looks
1604 1609 # into. This means that O(number of files) GCs are unavoidable.
1605 1610 # Depending on when in the process's lifetime the dirstate is parsed,
1606 1611 # this can get very expensive. As a workaround, disable GC while
1607 1612 # parsing the dirstate.
1608 1613 #
1609 1614 # (we cannot decorate the function directly since it is in a C module)
1610 1615 parse_dirstate = util.nogc(parsers.parse_dirstate)
1611 1616 p = parse_dirstate(self._map, self.copymap, st)
1612 1617 if not self._dirtyparents:
1613 1618 self.setparents(*p)
1614 1619
1615 1620 # Avoid excess attribute lookups by fast pathing certain checks
1616 1621 self.__contains__ = self._map.__contains__
1617 1622 self.__getitem__ = self._map.__getitem__
1618 1623 self.get = self._map.get
1619 1624
1620 1625 def write(self, st, now):
1621 1626 st.write(
1622 1627 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1623 1628 )
1624 1629 st.close()
1625 1630 self._dirtyparents = False
1626 1631 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1627 1632
1628 1633 @propertycache
1629 1634 def nonnormalset(self):
1630 1635 nonnorm, otherparents = self.nonnormalentries()
1631 1636 self.otherparentset = otherparents
1632 1637 return nonnorm
1633 1638
1634 1639 @propertycache
1635 1640 def otherparentset(self):
1636 1641 nonnorm, otherparents = self.nonnormalentries()
1637 1642 self.nonnormalset = nonnorm
1638 1643 return otherparents
1639 1644
1640 1645 @propertycache
1641 1646 def identity(self):
1642 1647 self._map
1643 1648 return self.identity
1644 1649
1645 1650 @propertycache
1646 1651 def dirfoldmap(self):
1647 1652 f = {}
1648 1653 normcase = util.normcase
1649 1654 for name in self._dirs:
1650 1655 f[normcase(name)] = name
1651 1656 return f
1652 1657
1653 1658
1654 1659 if rustmod is not None:
1655 1660
1656 1661 class dirstatemap(object):
1657 1662 def __init__(self, ui, opener, root):
1658 1663 self._ui = ui
1659 1664 self._opener = opener
1660 1665 self._root = root
1661 1666 self._filename = b'dirstate'
1662 1667 self._parents = None
1663 1668 self._dirtyparents = False
1664 1669
1665 1670 # for consistent view between _pl() and _read() invocations
1666 1671 self._pendingmode = None
1667 1672
1668 1673 def addfile(self, *args, **kwargs):
1669 1674 return self._rustmap.addfile(*args, **kwargs)
1670 1675
1671 1676 def removefile(self, *args, **kwargs):
1672 1677 return self._rustmap.removefile(*args, **kwargs)
1673 1678
1674 1679 def dropfile(self, *args, **kwargs):
1675 1680 return self._rustmap.dropfile(*args, **kwargs)
1676 1681
1677 1682 def clearambiguoustimes(self, *args, **kwargs):
1678 1683 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1679 1684
1680 1685 def nonnormalentries(self):
1681 1686 return self._rustmap.nonnormalentries()
1682 1687
1683 1688 def get(self, *args, **kwargs):
1684 1689 return self._rustmap.get(*args, **kwargs)
1685 1690
1686 1691 @propertycache
1687 1692 def _rustmap(self):
1688 1693 self._rustmap = rustmod.DirstateMap(self._root)
1689 1694 self.read()
1690 1695 return self._rustmap
1691 1696
1692 1697 @property
1693 1698 def copymap(self):
1694 1699 return self._rustmap.copymap()
1695 1700
1696 1701 def preload(self):
1697 1702 self._rustmap
1698 1703
1699 1704 def clear(self):
1700 1705 self._rustmap.clear()
1701 1706 self.setparents(nullid, nullid)
1702 1707 util.clearcachedproperty(self, b"_dirs")
1703 1708 util.clearcachedproperty(self, b"_alldirs")
1704 1709 util.clearcachedproperty(self, b"dirfoldmap")
1705 1710
1706 1711 def items(self):
1707 1712 return self._rustmap.items()
1708 1713
1709 1714 def keys(self):
1710 1715 return iter(self._rustmap)
1711 1716
1712 1717 def __contains__(self, key):
1713 1718 return key in self._rustmap
1714 1719
1715 1720 def __getitem__(self, item):
1716 1721 return self._rustmap[item]
1717 1722
1718 1723 def __len__(self):
1719 1724 return len(self._rustmap)
1720 1725
1721 1726 def __iter__(self):
1722 1727 return iter(self._rustmap)
1723 1728
1724 1729 # forward for python2,3 compat
1725 1730 iteritems = items
1726 1731
1727 1732 def _opendirstatefile(self):
1728 1733 fp, mode = txnutil.trypending(
1729 1734 self._root, self._opener, self._filename
1730 1735 )
1731 1736 if self._pendingmode is not None and self._pendingmode != mode:
1732 1737 fp.close()
1733 1738 raise error.Abort(
1734 1739 _(b'working directory state may be changed parallelly')
1735 1740 )
1736 1741 self._pendingmode = mode
1737 1742 return fp
1738 1743
1739 1744 def setparents(self, p1, p2):
1740 1745 self._rustmap.setparents(p1, p2)
1741 1746 self._parents = (p1, p2)
1742 1747 self._dirtyparents = True
1743 1748
1744 1749 def parents(self):
1745 1750 if not self._parents:
1746 1751 try:
1747 1752 fp = self._opendirstatefile()
1748 1753 st = fp.read(40)
1749 1754 fp.close()
1750 1755 except IOError as err:
1751 1756 if err.errno != errno.ENOENT:
1752 1757 raise
1753 1758 # File doesn't exist, so the current state is empty
1754 1759 st = b''
1755 1760
1756 1761 try:
1757 1762 self._parents = self._rustmap.parents(st)
1758 1763 except ValueError:
1759 1764 raise error.Abort(
1760 1765 _(b'working directory state appears damaged!')
1761 1766 )
1762 1767
1763 1768 return self._parents
1764 1769
1765 1770 def read(self):
1766 1771 # ignore HG_PENDING because identity is used only for writing
1767 1772 self.identity = util.filestat.frompath(
1768 1773 self._opener.join(self._filename)
1769 1774 )
1770 1775
1771 1776 try:
1772 1777 fp = self._opendirstatefile()
1773 1778 try:
1774 1779 st = fp.read()
1775 1780 finally:
1776 1781 fp.close()
1777 1782 except IOError as err:
1778 1783 if err.errno != errno.ENOENT:
1779 1784 raise
1780 1785 return
1781 1786 if not st:
1782 1787 return
1783 1788
1784 1789 parse_dirstate = util.nogc(self._rustmap.read)
1785 1790 parents = parse_dirstate(st)
1786 1791 if parents and not self._dirtyparents:
1787 1792 self.setparents(*parents)
1788 1793
1789 1794 self.__contains__ = self._rustmap.__contains__
1790 1795 self.__getitem__ = self._rustmap.__getitem__
1791 1796 self.get = self._rustmap.get
1792 1797
1793 1798 def write(self, st, now):
1794 1799 parents = self.parents()
1795 1800 st.write(self._rustmap.write(parents[0], parents[1], now))
1796 1801 st.close()
1797 1802 self._dirtyparents = False
1798 1803
1799 1804 @propertycache
1800 1805 def filefoldmap(self):
1801 1806 """Returns a dictionary mapping normalized case paths to their
1802 1807 non-normalized versions.
1803 1808 """
1804 1809 return self._rustmap.filefoldmapasdict()
1805 1810
1806 1811 def hastrackeddir(self, d):
1807 1812 self._dirs # Trigger Python's propertycache
1808 1813 return self._rustmap.hastrackeddir(d)
1809 1814
1810 1815 def hasdir(self, d):
1811 1816 self._dirs # Trigger Python's propertycache
1812 1817 return self._rustmap.hasdir(d)
1813 1818
1814 1819 @propertycache
1815 1820 def _dirs(self):
1816 1821 return self._rustmap.getdirs()
1817 1822
1818 1823 @propertycache
1819 1824 def _alldirs(self):
1820 1825 return self._rustmap.getalldirs()
1821 1826
1822 1827 @propertycache
1823 1828 def identity(self):
1824 1829 self._rustmap
1825 1830 return self.identity
1826 1831
1827 1832 @property
1828 1833 def nonnormalset(self):
1829 1834 nonnorm, otherparents = self._rustmap.nonnormalentries()
1830 1835 return nonnorm
1831 1836
1832 1837 @property
1833 1838 def otherparentset(self):
1834 1839 nonnorm, otherparents = self._rustmap.nonnormalentries()
1835 1840 return otherparents
1836 1841
1837 1842 @propertycache
1838 1843 def dirfoldmap(self):
1839 1844 f = {}
1840 1845 normcase = util.normcase
1841 1846 for name in self._dirs:
1842 1847 f[normcase(name)] = name
1843 1848 return f
@@ -1,2710 +1,2708 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import shutil
13 13 import stat
14 14 import struct
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullhex,
23 23 nullid,
24 24 nullrev,
25 25 )
26 26 from .pycompat import delattr
27 27 from .thirdparty import attr
28 28 from . import (
29 29 copies,
30 30 encoding,
31 31 error,
32 32 filemerge,
33 33 match as matchmod,
34 34 obsutil,
35 35 pathutil,
36 36 pycompat,
37 37 scmutil,
38 38 subrepoutil,
39 39 util,
40 40 worker,
41 41 )
42 42
43 43 _pack = struct.pack
44 44 _unpack = struct.unpack
45 45
46 46
47 47 def _droponode(data):
48 48 # used for compatibility for v1
49 49 bits = data.split(b'\0')
50 50 bits = bits[:-2] + bits[-1:]
51 51 return b'\0'.join(bits)
52 52
53 53
54 54 # Merge state record types. See ``mergestate`` docs for more.
55 55 RECORD_LOCAL = b'L'
56 56 RECORD_OTHER = b'O'
57 57 RECORD_MERGED = b'F'
58 58 RECORD_CHANGEDELETE_CONFLICT = b'C'
59 59 RECORD_MERGE_DRIVER_MERGE = b'D'
60 60 RECORD_PATH_CONFLICT = b'P'
61 61 RECORD_MERGE_DRIVER_STATE = b'm'
62 62 RECORD_FILE_VALUES = b'f'
63 63 RECORD_LABELS = b'l'
64 64 RECORD_OVERRIDE = b't'
65 65 RECORD_UNSUPPORTED_MANDATORY = b'X'
66 66 RECORD_UNSUPPORTED_ADVISORY = b'x'
67 67
68 68 MERGE_DRIVER_STATE_UNMARKED = b'u'
69 69 MERGE_DRIVER_STATE_MARKED = b'm'
70 70 MERGE_DRIVER_STATE_SUCCESS = b's'
71 71
72 72 MERGE_RECORD_UNRESOLVED = b'u'
73 73 MERGE_RECORD_RESOLVED = b'r'
74 74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
77 77
78 78 ACTION_FORGET = b'f'
79 79 ACTION_REMOVE = b'r'
80 80 ACTION_ADD = b'a'
81 81 ACTION_GET = b'g'
82 82 ACTION_PATH_CONFLICT = b'p'
83 83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
84 84 ACTION_ADD_MODIFIED = b'am'
85 85 ACTION_CREATED = b'c'
86 86 ACTION_DELETED_CHANGED = b'dc'
87 87 ACTION_CHANGED_DELETED = b'cd'
88 88 ACTION_MERGE = b'm'
89 89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
90 90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
91 91 ACTION_KEEP = b'k'
92 92 ACTION_EXEC = b'e'
93 93 ACTION_CREATED_MERGE = b'cm'
94 94
95 95
96 96 class mergestate(object):
97 97 '''track 3-way merge state of individual files
98 98
99 99 The merge state is stored on disk when needed. Two files are used: one with
100 100 an old format (version 1), and one with a new format (version 2). Version 2
101 101 stores a superset of the data in version 1, including new kinds of records
102 102 in the future. For more about the new format, see the documentation for
103 103 `_readrecordsv2`.
104 104
105 105 Each record can contain arbitrary content, and has an associated type. This
106 106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
107 107 versions of Mercurial that don't support it should abort. If `type` is
108 108 lowercase, the record can be safely ignored.
109 109
110 110 Currently known records:
111 111
112 112 L: the node of the "local" part of the merge (hexified version)
113 113 O: the node of the "other" part of the merge (hexified version)
114 114 F: a file to be merged entry
115 115 C: a change/delete or delete/change conflict
116 116 D: a file that the external merge driver will merge internally
117 117 (experimental)
118 118 P: a path conflict (file vs directory)
119 119 m: the external merge driver defined for this merge plus its run state
120 120 (experimental)
121 121 f: a (filename, dictionary) tuple of optional values for a given file
122 122 X: unsupported mandatory record type (used in tests)
123 123 x: unsupported advisory record type (used in tests)
124 124 l: the labels for the parts of the merge.
125 125
126 126 Merge driver run states (experimental):
127 127 u: driver-resolved files unmarked -- needs to be run next time we're about
128 128 to resolve or commit
129 129 m: driver-resolved files marked -- only needs to be run before commit
130 130 s: success/skipped -- does not need to be run any more
131 131
132 132 Merge record states (stored in self._state, indexed by filename):
133 133 u: unresolved conflict
134 134 r: resolved conflict
135 135 pu: unresolved path conflict (file conflicts with directory)
136 136 pr: resolved path conflict
137 137 d: driver-resolved conflict
138 138
139 139 The resolve command transitions between 'u' and 'r' for conflicts and
140 140 'pu' and 'pr' for path conflicts.
141 141 '''
142 142
143 143 statepathv1 = b'merge/state'
144 144 statepathv2 = b'merge/state2'
145 145
146 146 @staticmethod
147 147 def clean(repo, node=None, other=None, labels=None):
148 148 """Initialize a brand new merge state, removing any existing state on
149 149 disk."""
150 150 ms = mergestate(repo)
151 151 ms.reset(node, other, labels)
152 152 return ms
153 153
154 154 @staticmethod
155 155 def read(repo):
156 156 """Initialize the merge state, reading it from disk."""
157 157 ms = mergestate(repo)
158 158 ms._read()
159 159 return ms
160 160
161 161 def __init__(self, repo):
162 162 """Initialize the merge state.
163 163
164 164 Do not use this directly! Instead call read() or clean()."""
165 165 self._repo = repo
166 166 self._dirty = False
167 167 self._labels = None
168 168
169 169 def reset(self, node=None, other=None, labels=None):
170 170 self._state = {}
171 171 self._stateextras = {}
172 172 self._local = None
173 173 self._other = None
174 174 self._labels = labels
175 175 for var in ('localctx', 'otherctx'):
176 176 if var in vars(self):
177 177 delattr(self, var)
178 178 if node:
179 179 self._local = node
180 180 self._other = other
181 181 self._readmergedriver = None
182 182 if self.mergedriver:
183 183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
184 184 else:
185 185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
186 186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
187 187 self._results = {}
188 188 self._dirty = False
189 189
190 190 def _read(self):
191 191 """Analyse each record content to restore a serialized state from disk
192 192
193 193 This function process "record" entry produced by the de-serialization
194 194 of on disk file.
195 195 """
196 196 self._state = {}
197 197 self._stateextras = {}
198 198 self._local = None
199 199 self._other = None
200 200 for var in ('localctx', 'otherctx'):
201 201 if var in vars(self):
202 202 delattr(self, var)
203 203 self._readmergedriver = None
204 204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
205 205 unsupported = set()
206 206 records = self._readrecords()
207 207 for rtype, record in records:
208 208 if rtype == RECORD_LOCAL:
209 209 self._local = bin(record)
210 210 elif rtype == RECORD_OTHER:
211 211 self._other = bin(record)
212 212 elif rtype == RECORD_MERGE_DRIVER_STATE:
213 213 bits = record.split(b'\0', 1)
214 214 mdstate = bits[1]
215 215 if len(mdstate) != 1 or mdstate not in (
216 216 MERGE_DRIVER_STATE_UNMARKED,
217 217 MERGE_DRIVER_STATE_MARKED,
218 218 MERGE_DRIVER_STATE_SUCCESS,
219 219 ):
220 220 # the merge driver should be idempotent, so just rerun it
221 221 mdstate = MERGE_DRIVER_STATE_UNMARKED
222 222
223 223 self._readmergedriver = bits[0]
224 224 self._mdstate = mdstate
225 225 elif rtype in (
226 226 RECORD_MERGED,
227 227 RECORD_CHANGEDELETE_CONFLICT,
228 228 RECORD_PATH_CONFLICT,
229 229 RECORD_MERGE_DRIVER_MERGE,
230 230 ):
231 231 bits = record.split(b'\0')
232 232 self._state[bits[0]] = bits[1:]
233 233 elif rtype == RECORD_FILE_VALUES:
234 234 filename, rawextras = record.split(b'\0', 1)
235 235 extraparts = rawextras.split(b'\0')
236 236 extras = {}
237 237 i = 0
238 238 while i < len(extraparts):
239 239 extras[extraparts[i]] = extraparts[i + 1]
240 240 i += 2
241 241
242 242 self._stateextras[filename] = extras
243 243 elif rtype == RECORD_LABELS:
244 244 labels = record.split(b'\0', 2)
245 245 self._labels = [l for l in labels if len(l) > 0]
246 246 elif not rtype.islower():
247 247 unsupported.add(rtype)
248 248 self._results = {}
249 249 self._dirty = False
250 250
251 251 if unsupported:
252 252 raise error.UnsupportedMergeRecords(unsupported)
253 253
254 254 def _readrecords(self):
255 255 """Read merge state from disk and return a list of record (TYPE, data)
256 256
257 257 We read data from both v1 and v2 files and decide which one to use.
258 258
259 259 V1 has been used by version prior to 2.9.1 and contains less data than
260 260 v2. We read both versions and check if no data in v2 contradicts
261 261 v1. If there is not contradiction we can safely assume that both v1
262 262 and v2 were written at the same time and use the extract data in v2. If
263 263 there is contradiction we ignore v2 content as we assume an old version
264 264 of Mercurial has overwritten the mergestate file and left an old v2
265 265 file around.
266 266
267 267 returns list of record [(TYPE, data), ...]"""
268 268 v1records = self._readrecordsv1()
269 269 v2records = self._readrecordsv2()
270 270 if self._v1v2match(v1records, v2records):
271 271 return v2records
272 272 else:
273 273 # v1 file is newer than v2 file, use it
274 274 # we have to infer the "other" changeset of the merge
275 275 # we cannot do better than that with v1 of the format
276 276 mctx = self._repo[None].parents()[-1]
277 277 v1records.append((RECORD_OTHER, mctx.hex()))
278 278 # add place holder "other" file node information
279 279 # nobody is using it yet so we do no need to fetch the data
280 280 # if mctx was wrong `mctx[bits[-2]]` may fails.
281 281 for idx, r in enumerate(v1records):
282 282 if r[0] == RECORD_MERGED:
283 283 bits = r[1].split(b'\0')
284 284 bits.insert(-2, b'')
285 285 v1records[idx] = (r[0], b'\0'.join(bits))
286 286 return v1records
287 287
288 288 def _v1v2match(self, v1records, v2records):
289 289 oldv2 = set() # old format version of v2 record
290 290 for rec in v2records:
291 291 if rec[0] == RECORD_LOCAL:
292 292 oldv2.add(rec)
293 293 elif rec[0] == RECORD_MERGED:
294 294 # drop the onode data (not contained in v1)
295 295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
296 296 for rec in v1records:
297 297 if rec not in oldv2:
298 298 return False
299 299 else:
300 300 return True
301 301
302 302 def _readrecordsv1(self):
303 303 """read on disk merge state for version 1 file
304 304
305 305 returns list of record [(TYPE, data), ...]
306 306
307 307 Note: the "F" data from this file are one entry short
308 308 (no "other file node" entry)
309 309 """
310 310 records = []
311 311 try:
312 312 f = self._repo.vfs(self.statepathv1)
313 313 for i, l in enumerate(f):
314 314 if i == 0:
315 315 records.append((RECORD_LOCAL, l[:-1]))
316 316 else:
317 317 records.append((RECORD_MERGED, l[:-1]))
318 318 f.close()
319 319 except IOError as err:
320 320 if err.errno != errno.ENOENT:
321 321 raise
322 322 return records
323 323
324 324 def _readrecordsv2(self):
325 325 """read on disk merge state for version 2 file
326 326
327 327 This format is a list of arbitrary records of the form:
328 328
329 329 [type][length][content]
330 330
331 331 `type` is a single character, `length` is a 4 byte integer, and
332 332 `content` is an arbitrary byte sequence of length `length`.
333 333
334 334 Mercurial versions prior to 3.7 have a bug where if there are
335 335 unsupported mandatory merge records, attempting to clear out the merge
336 336 state with hg update --clean or similar aborts. The 't' record type
337 337 works around that by writing out what those versions treat as an
338 338 advisory record, but later versions interpret as special: the first
339 339 character is the 'real' record type and everything onwards is the data.
340 340
341 341 Returns list of records [(TYPE, data), ...]."""
342 342 records = []
343 343 try:
344 344 f = self._repo.vfs(self.statepathv2)
345 345 data = f.read()
346 346 off = 0
347 347 end = len(data)
348 348 while off < end:
349 349 rtype = data[off : off + 1]
350 350 off += 1
351 351 length = _unpack(b'>I', data[off : (off + 4)])[0]
352 352 off += 4
353 353 record = data[off : (off + length)]
354 354 off += length
355 355 if rtype == RECORD_OVERRIDE:
356 356 rtype, record = record[0:1], record[1:]
357 357 records.append((rtype, record))
358 358 f.close()
359 359 except IOError as err:
360 360 if err.errno != errno.ENOENT:
361 361 raise
362 362 return records
363 363
364 364 @util.propertycache
365 365 def mergedriver(self):
366 366 # protect against the following:
367 367 # - A configures a malicious merge driver in their hgrc, then
368 368 # pauses the merge
369 369 # - A edits their hgrc to remove references to the merge driver
370 370 # - A gives a copy of their entire repo, including .hg, to B
371 371 # - B inspects .hgrc and finds it to be clean
372 372 # - B then continues the merge and the malicious merge driver
373 373 # gets invoked
374 374 configmergedriver = self._repo.ui.config(
375 375 b'experimental', b'mergedriver'
376 376 )
377 377 if (
378 378 self._readmergedriver is not None
379 379 and self._readmergedriver != configmergedriver
380 380 ):
381 381 raise error.ConfigError(
382 382 _(b"merge driver changed since merge started"),
383 383 hint=_(b"revert merge driver change or abort merge"),
384 384 )
385 385
386 386 return configmergedriver
387 387
388 388 @util.propertycache
389 389 def localctx(self):
390 390 if self._local is None:
391 391 msg = b"localctx accessed but self._local isn't set"
392 392 raise error.ProgrammingError(msg)
393 393 return self._repo[self._local]
394 394
395 395 @util.propertycache
396 396 def otherctx(self):
397 397 if self._other is None:
398 398 msg = b"otherctx accessed but self._other isn't set"
399 399 raise error.ProgrammingError(msg)
400 400 return self._repo[self._other]
401 401
402 402 def active(self):
403 403 """Whether mergestate is active.
404 404
405 405 Returns True if there appears to be mergestate. This is a rough proxy
406 406 for "is a merge in progress."
407 407 """
408 408 # Check local variables before looking at filesystem for performance
409 409 # reasons.
410 410 return (
411 411 bool(self._local)
412 412 or bool(self._state)
413 413 or self._repo.vfs.exists(self.statepathv1)
414 414 or self._repo.vfs.exists(self.statepathv2)
415 415 )
416 416
417 417 def commit(self):
418 418 """Write current state on disk (if necessary)"""
419 419 if self._dirty:
420 420 records = self._makerecords()
421 421 self._writerecords(records)
422 422 self._dirty = False
423 423
424 424 def _makerecords(self):
425 425 records = []
426 426 records.append((RECORD_LOCAL, hex(self._local)))
427 427 records.append((RECORD_OTHER, hex(self._other)))
428 428 if self.mergedriver:
429 429 records.append(
430 430 (
431 431 RECORD_MERGE_DRIVER_STATE,
432 432 b'\0'.join([self.mergedriver, self._mdstate]),
433 433 )
434 434 )
435 435 # Write out state items. In all cases, the value of the state map entry
436 436 # is written as the contents of the record. The record type depends on
437 437 # the type of state that is stored, and capital-letter records are used
438 438 # to prevent older versions of Mercurial that do not support the feature
439 439 # from loading them.
440 440 for filename, v in pycompat.iteritems(self._state):
441 441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
442 442 # Driver-resolved merge. These are stored in 'D' records.
443 443 records.append(
444 444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
445 445 )
446 446 elif v[0] in (
447 447 MERGE_RECORD_UNRESOLVED_PATH,
448 448 MERGE_RECORD_RESOLVED_PATH,
449 449 ):
450 450 # Path conflicts. These are stored in 'P' records. The current
451 451 # resolution state ('pu' or 'pr') is stored within the record.
452 452 records.append(
453 453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
454 454 )
455 455 elif v[1] == nullhex or v[6] == nullhex:
456 456 # Change/Delete or Delete/Change conflicts. These are stored in
457 457 # 'C' records. v[1] is the local file, and is nullhex when the
458 458 # file is deleted locally ('dc'). v[6] is the remote file, and
459 459 # is nullhex when the file is deleted remotely ('cd').
460 460 records.append(
461 461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
462 462 )
463 463 else:
464 464 # Normal files. These are stored in 'F' records.
465 465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
466 466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
467 467 rawextras = b'\0'.join(
468 468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
469 469 )
470 470 records.append(
471 471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
472 472 )
473 473 if self._labels is not None:
474 474 labels = b'\0'.join(self._labels)
475 475 records.append((RECORD_LABELS, labels))
476 476 return records
477 477
478 478 def _writerecords(self, records):
479 479 """Write current state on disk (both v1 and v2)"""
480 480 self._writerecordsv1(records)
481 481 self._writerecordsv2(records)
482 482
483 483 def _writerecordsv1(self, records):
484 484 """Write current state on disk in a version 1 file"""
485 485 f = self._repo.vfs(self.statepathv1, b'wb')
486 486 irecords = iter(records)
487 487 lrecords = next(irecords)
488 488 assert lrecords[0] == RECORD_LOCAL
489 489 f.write(hex(self._local) + b'\n')
490 490 for rtype, data in irecords:
491 491 if rtype == RECORD_MERGED:
492 492 f.write(b'%s\n' % _droponode(data))
493 493 f.close()
494 494
495 495 def _writerecordsv2(self, records):
496 496 """Write current state on disk in a version 2 file
497 497
498 498 See the docstring for _readrecordsv2 for why we use 't'."""
499 499 # these are the records that all version 2 clients can read
500 500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
501 501 f = self._repo.vfs(self.statepathv2, b'wb')
502 502 for key, data in records:
503 503 assert len(key) == 1
504 504 if key not in allowlist:
505 505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
506 506 format = b'>sI%is' % len(data)
507 507 f.write(_pack(format, key, len(data), data))
508 508 f.close()
509 509
510 510 @staticmethod
511 511 def getlocalkey(path):
512 512 """hash the path of a local file context for storage in the .hg/merge
513 513 directory."""
514 514
515 515 return hex(hashlib.sha1(path).digest())
516 516
517 517 def add(self, fcl, fco, fca, fd):
518 518 """add a new (potentially?) conflicting file the merge state
519 519 fcl: file context for local,
520 520 fco: file context for remote,
521 521 fca: file context for ancestors,
522 522 fd: file path of the resulting merge.
523 523
524 524 note: also write the local version to the `.hg/merge` directory.
525 525 """
526 526 if fcl.isabsent():
527 527 localkey = nullhex
528 528 else:
529 529 localkey = mergestate.getlocalkey(fcl.path())
530 530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
531 531 self._state[fd] = [
532 532 MERGE_RECORD_UNRESOLVED,
533 533 localkey,
534 534 fcl.path(),
535 535 fca.path(),
536 536 hex(fca.filenode()),
537 537 fco.path(),
538 538 hex(fco.filenode()),
539 539 fcl.flags(),
540 540 ]
541 541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
542 542 self._dirty = True
543 543
544 544 def addpath(self, path, frename, forigin):
545 545 """add a new conflicting path to the merge state
546 546 path: the path that conflicts
547 547 frename: the filename the conflicting file was renamed to
548 548 forigin: origin of the file ('l' or 'r' for local/remote)
549 549 """
550 550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
551 551 self._dirty = True
552 552
553 553 def __contains__(self, dfile):
554 554 return dfile in self._state
555 555
556 556 def __getitem__(self, dfile):
557 557 return self._state[dfile][0]
558 558
559 559 def __iter__(self):
560 560 return iter(sorted(self._state))
561 561
562 562 def files(self):
563 563 return self._state.keys()
564 564
565 565 def mark(self, dfile, state):
566 566 self._state[dfile][0] = state
567 567 self._dirty = True
568 568
569 569 def mdstate(self):
570 570 return self._mdstate
571 571
572 572 def unresolved(self):
573 573 """Obtain the paths of unresolved files."""
574 574
575 575 for f, entry in pycompat.iteritems(self._state):
576 576 if entry[0] in (
577 577 MERGE_RECORD_UNRESOLVED,
578 578 MERGE_RECORD_UNRESOLVED_PATH,
579 579 ):
580 580 yield f
581 581
582 582 def driverresolved(self):
583 583 """Obtain the paths of driver-resolved files."""
584 584
585 585 for f, entry in self._state.items():
586 586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
587 587 yield f
588 588
589 589 def extras(self, filename):
590 590 return self._stateextras.setdefault(filename, {})
591 591
592 592 def _resolve(self, preresolve, dfile, wctx):
593 593 """rerun merge process for file path `dfile`"""
594 594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
595 595 return True, 0
596 596 stateentry = self._state[dfile]
597 597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
598 598 octx = self._repo[self._other]
599 599 extras = self.extras(dfile)
600 600 anccommitnode = extras.get(b'ancestorlinknode')
601 601 if anccommitnode:
602 602 actx = self._repo[anccommitnode]
603 603 else:
604 604 actx = None
605 605 fcd = self._filectxorabsent(localkey, wctx, dfile)
606 606 fco = self._filectxorabsent(onode, octx, ofile)
607 607 # TODO: move this to filectxorabsent
608 608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
609 609 # "premerge" x flags
610 610 flo = fco.flags()
611 611 fla = fca.flags()
612 612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
613 613 if fca.node() == nullid and flags != flo:
614 614 if preresolve:
615 615 self._repo.ui.warn(
616 616 _(
617 617 b'warning: cannot merge flags for %s '
618 618 b'without common ancestor - keeping local flags\n'
619 619 )
620 620 % afile
621 621 )
622 622 elif flags == fla:
623 623 flags = flo
624 624 if preresolve:
625 625 # restore local
626 626 if localkey != nullhex:
627 627 f = self._repo.vfs(b'merge/' + localkey)
628 628 wctx[dfile].write(f.read(), flags)
629 629 f.close()
630 630 else:
631 631 wctx[dfile].remove(ignoremissing=True)
632 632 complete, r, deleted = filemerge.premerge(
633 633 self._repo,
634 634 wctx,
635 635 self._local,
636 636 lfile,
637 637 fcd,
638 638 fco,
639 639 fca,
640 640 labels=self._labels,
641 641 )
642 642 else:
643 643 complete, r, deleted = filemerge.filemerge(
644 644 self._repo,
645 645 wctx,
646 646 self._local,
647 647 lfile,
648 648 fcd,
649 649 fco,
650 650 fca,
651 651 labels=self._labels,
652 652 )
653 653 if r is None:
654 654 # no real conflict
655 655 del self._state[dfile]
656 656 self._stateextras.pop(dfile, None)
657 657 self._dirty = True
658 658 elif not r:
659 659 self.mark(dfile, MERGE_RECORD_RESOLVED)
660 660
661 661 if complete:
662 662 action = None
663 663 if deleted:
664 664 if fcd.isabsent():
665 665 # dc: local picked. Need to drop if present, which may
666 666 # happen on re-resolves.
667 667 action = ACTION_FORGET
668 668 else:
669 669 # cd: remote picked (or otherwise deleted)
670 670 action = ACTION_REMOVE
671 671 else:
672 672 if fcd.isabsent(): # dc: remote picked
673 673 action = ACTION_GET
674 674 elif fco.isabsent(): # cd: local picked
675 675 if dfile in self.localctx:
676 676 action = ACTION_ADD_MODIFIED
677 677 else:
678 678 action = ACTION_ADD
679 679 # else: regular merges (no action necessary)
680 680 self._results[dfile] = r, action
681 681
682 682 return complete, r
683 683
684 684 def _filectxorabsent(self, hexnode, ctx, f):
685 685 if hexnode == nullhex:
686 686 return filemerge.absentfilectx(ctx, f)
687 687 else:
688 688 return ctx[f]
689 689
690 690 def preresolve(self, dfile, wctx):
691 691 """run premerge process for dfile
692 692
693 693 Returns whether the merge is complete, and the exit code."""
694 694 return self._resolve(True, dfile, wctx)
695 695
696 696 def resolve(self, dfile, wctx):
697 697 """run merge process (assuming premerge was run) for dfile
698 698
699 699 Returns the exit code of the merge."""
700 700 return self._resolve(False, dfile, wctx)[1]
701 701
702 702 def counts(self):
703 703 """return counts for updated, merged and removed files in this
704 704 session"""
705 705 updated, merged, removed = 0, 0, 0
706 706 for r, action in pycompat.itervalues(self._results):
707 707 if r is None:
708 708 updated += 1
709 709 elif r == 0:
710 710 if action == ACTION_REMOVE:
711 711 removed += 1
712 712 else:
713 713 merged += 1
714 714 return updated, merged, removed
715 715
716 716 def unresolvedcount(self):
717 717 """get unresolved count for this merge (persistent)"""
718 718 return len(list(self.unresolved()))
719 719
720 720 def actions(self):
721 721 """return lists of actions to perform on the dirstate"""
722 722 actions = {
723 723 ACTION_REMOVE: [],
724 724 ACTION_FORGET: [],
725 725 ACTION_ADD: [],
726 726 ACTION_ADD_MODIFIED: [],
727 727 ACTION_GET: [],
728 728 }
729 729 for f, (r, action) in pycompat.iteritems(self._results):
730 730 if action is not None:
731 731 actions[action].append((f, None, b"merge result"))
732 732 return actions
733 733
734 734 def recordactions(self):
735 735 """record remove/add/get actions in the dirstate"""
736 736 branchmerge = self._repo.dirstate.p2() != nullid
737 737 recordupdates(self._repo, self.actions(), branchmerge, None)
738 738
739 739 def queueremove(self, f):
740 740 """queues a file to be removed from the dirstate
741 741
742 742 Meant for use by custom merge drivers."""
743 743 self._results[f] = 0, ACTION_REMOVE
744 744
745 745 def queueadd(self, f):
746 746 """queues a file to be added to the dirstate
747 747
748 748 Meant for use by custom merge drivers."""
749 749 self._results[f] = 0, ACTION_ADD
750 750
751 751 def queueget(self, f):
752 752 """queues a file to be marked modified in the dirstate
753 753
754 754 Meant for use by custom merge drivers."""
755 755 self._results[f] = 0, ACTION_GET
756 756
757 757
758 758 def _getcheckunknownconfig(repo, section, name):
759 759 config = repo.ui.config(section, name)
760 760 valid = [b'abort', b'ignore', b'warn']
761 761 if config not in valid:
762 762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
763 763 raise error.ConfigError(
764 764 _(b"%s.%s not valid ('%s' is none of %s)")
765 765 % (section, name, config, validstr)
766 766 )
767 767 return config
768 768
769 769
770 770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
771 771 if wctx.isinmemory():
772 772 # Nothing to do in IMM because nothing in the "working copy" can be an
773 773 # unknown file.
774 774 #
775 775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
776 776 # because that function does other useful work.
777 777 return False
778 778
779 779 if f2 is None:
780 780 f2 = f
781 781 return (
782 782 repo.wvfs.audit.check(f)
783 783 and repo.wvfs.isfileorlink(f)
784 784 and repo.dirstate.normalize(f) not in repo.dirstate
785 785 and mctx[f2].cmp(wctx[f])
786 786 )
787 787
788 788
789 789 class _unknowndirschecker(object):
790 790 """
791 791 Look for any unknown files or directories that may have a path conflict
792 792 with a file. If any path prefix of the file exists as a file or link,
793 793 then it conflicts. If the file itself is a directory that contains any
794 794 file that is not tracked, then it conflicts.
795 795
796 796 Returns the shortest path at which a conflict occurs, or None if there is
797 797 no conflict.
798 798 """
799 799
800 800 def __init__(self):
801 801 # A set of paths known to be good. This prevents repeated checking of
802 802 # dirs. It will be updated with any new dirs that are checked and found
803 803 # to be safe.
804 804 self._unknowndircache = set()
805 805
806 806 # A set of paths that are known to be absent. This prevents repeated
807 807 # checking of subdirectories that are known not to exist. It will be
808 808 # updated with any new dirs that are checked and found to be absent.
809 809 self._missingdircache = set()
810 810
811 811 def __call__(self, repo, wctx, f):
812 812 if wctx.isinmemory():
813 813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
814 814 return False
815 815
816 816 # Check for path prefixes that exist as unknown files.
817 817 for p in reversed(list(pathutil.finddirs(f))):
818 818 if p in self._missingdircache:
819 819 return
820 820 if p in self._unknowndircache:
821 821 continue
822 822 if repo.wvfs.audit.check(p):
823 823 if (
824 824 repo.wvfs.isfileorlink(p)
825 825 and repo.dirstate.normalize(p) not in repo.dirstate
826 826 ):
827 827 return p
828 828 if not repo.wvfs.lexists(p):
829 829 self._missingdircache.add(p)
830 830 return
831 831 self._unknowndircache.add(p)
832 832
833 833 # Check if the file conflicts with a directory containing unknown files.
834 834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
835 835 # Does the directory contain any files that are not in the dirstate?
836 836 for p, dirs, files in repo.wvfs.walk(f):
837 837 for fn in files:
838 838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
839 839 relf = repo.dirstate.normalize(relf, isknown=True)
840 840 if relf not in repo.dirstate:
841 841 return f
842 842 return None
843 843
844 844
845 845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
846 846 """
847 847 Considers any actions that care about the presence of conflicting unknown
848 848 files. For some actions, the result is to abort; for others, it is to
849 849 choose a different action.
850 850 """
851 851 fileconflicts = set()
852 852 pathconflicts = set()
853 853 warnconflicts = set()
854 854 abortconflicts = set()
855 855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
856 856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
857 857 pathconfig = repo.ui.configbool(
858 858 b'experimental', b'merge.checkpathconflicts'
859 859 )
860 860 if not force:
861 861
862 862 def collectconflicts(conflicts, config):
863 863 if config == b'abort':
864 864 abortconflicts.update(conflicts)
865 865 elif config == b'warn':
866 866 warnconflicts.update(conflicts)
867 867
868 868 checkunknowndirs = _unknowndirschecker()
869 869 for f, (m, args, msg) in pycompat.iteritems(actions):
870 870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
871 871 if _checkunknownfile(repo, wctx, mctx, f):
872 872 fileconflicts.add(f)
873 873 elif pathconfig and f not in wctx:
874 874 path = checkunknowndirs(repo, wctx, f)
875 875 if path is not None:
876 876 pathconflicts.add(path)
877 877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
878 878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
879 879 fileconflicts.add(f)
880 880
881 881 allconflicts = fileconflicts | pathconflicts
882 882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
883 883 unknownconflicts = allconflicts - ignoredconflicts
884 884 collectconflicts(ignoredconflicts, ignoredconfig)
885 885 collectconflicts(unknownconflicts, unknownconfig)
886 886 else:
887 887 for f, (m, args, msg) in pycompat.iteritems(actions):
888 888 if m == ACTION_CREATED_MERGE:
889 889 fl2, anc = args
890 890 different = _checkunknownfile(repo, wctx, mctx, f)
891 891 if repo.dirstate._ignore(f):
892 892 config = ignoredconfig
893 893 else:
894 894 config = unknownconfig
895 895
896 896 # The behavior when force is True is described by this table:
897 897 # config different mergeforce | action backup
898 898 # * n * | get n
899 899 # * y y | merge -
900 900 # abort y n | merge - (1)
901 901 # warn y n | warn + get y
902 902 # ignore y n | get y
903 903 #
904 904 # (1) this is probably the wrong behavior here -- we should
905 905 # probably abort, but some actions like rebases currently
906 906 # don't like an abort happening in the middle of
907 907 # merge.update.
908 908 if not different:
909 909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
910 910 elif mergeforce or config == b'abort':
911 911 actions[f] = (
912 912 ACTION_MERGE,
913 913 (f, f, None, False, anc),
914 914 b'remote differs from untracked local',
915 915 )
916 916 elif config == b'abort':
917 917 abortconflicts.add(f)
918 918 else:
919 919 if config == b'warn':
920 920 warnconflicts.add(f)
921 921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
922 922
923 923 for f in sorted(abortconflicts):
924 924 warn = repo.ui.warn
925 925 if f in pathconflicts:
926 926 if repo.wvfs.isfileorlink(f):
927 927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
928 928 else:
929 929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
930 930 else:
931 931 warn(_(b"%s: untracked file differs\n") % f)
932 932 if abortconflicts:
933 933 raise error.Abort(
934 934 _(
935 935 b"untracked files in working directory "
936 936 b"differ from files in requested revision"
937 937 )
938 938 )
939 939
940 940 for f in sorted(warnconflicts):
941 941 if repo.wvfs.isfileorlink(f):
942 942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
943 943 else:
944 944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
945 945
946 946 for f, (m, args, msg) in pycompat.iteritems(actions):
947 947 if m == ACTION_CREATED:
948 948 backup = (
949 949 f in fileconflicts
950 950 or f in pathconflicts
951 951 or any(p in pathconflicts for p in pathutil.finddirs(f))
952 952 )
953 953 (flags,) = args
954 954 actions[f] = (ACTION_GET, (flags, backup), msg)
955 955
956 956
957 957 def _forgetremoved(wctx, mctx, branchmerge):
958 958 """
959 959 Forget removed files
960 960
961 961 If we're jumping between revisions (as opposed to merging), and if
962 962 neither the working directory nor the target rev has the file,
963 963 then we need to remove it from the dirstate, to prevent the
964 964 dirstate from listing the file when it is no longer in the
965 965 manifest.
966 966
967 967 If we're merging, and the other revision has removed a file
968 968 that is not present in the working directory, we need to mark it
969 969 as removed.
970 970 """
971 971
972 972 actions = {}
973 973 m = ACTION_FORGET
974 974 if branchmerge:
975 975 m = ACTION_REMOVE
976 976 for f in wctx.deleted():
977 977 if f not in mctx:
978 978 actions[f] = m, None, b"forget deleted"
979 979
980 980 if not branchmerge:
981 981 for f in wctx.removed():
982 982 if f not in mctx:
983 983 actions[f] = ACTION_FORGET, None, b"forget removed"
984 984
985 985 return actions
986 986
987 987
988 988 def _checkcollision(repo, wmf, actions):
989 989 """
990 990 Check for case-folding collisions.
991 991 """
992 992
993 993 # If the repo is narrowed, filter out files outside the narrowspec.
994 994 narrowmatch = repo.narrowmatch()
995 995 if not narrowmatch.always():
996 996 wmf = wmf.matches(narrowmatch)
997 997 if actions:
998 998 narrowactions = {}
999 999 for m, actionsfortype in pycompat.iteritems(actions):
1000 1000 narrowactions[m] = []
1001 1001 for (f, args, msg) in actionsfortype:
1002 1002 if narrowmatch(f):
1003 1003 narrowactions[m].append((f, args, msg))
1004 1004 actions = narrowactions
1005 1005
1006 1006 # build provisional merged manifest up
1007 1007 pmmf = set(wmf)
1008 1008
1009 1009 if actions:
1010 1010 # KEEP and EXEC are no-op
1011 1011 for m in (
1012 1012 ACTION_ADD,
1013 1013 ACTION_ADD_MODIFIED,
1014 1014 ACTION_FORGET,
1015 1015 ACTION_GET,
1016 1016 ACTION_CHANGED_DELETED,
1017 1017 ACTION_DELETED_CHANGED,
1018 1018 ):
1019 1019 for f, args, msg in actions[m]:
1020 1020 pmmf.add(f)
1021 1021 for f, args, msg in actions[ACTION_REMOVE]:
1022 1022 pmmf.discard(f)
1023 1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1024 1024 f2, flags = args
1025 1025 pmmf.discard(f2)
1026 1026 pmmf.add(f)
1027 1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1028 1028 pmmf.add(f)
1029 1029 for f, args, msg in actions[ACTION_MERGE]:
1030 1030 f1, f2, fa, move, anc = args
1031 1031 if move:
1032 1032 pmmf.discard(f1)
1033 1033 pmmf.add(f)
1034 1034
1035 1035 # check case-folding collision in provisional merged manifest
1036 1036 foldmap = {}
1037 1037 for f in pmmf:
1038 1038 fold = util.normcase(f)
1039 1039 if fold in foldmap:
1040 1040 raise error.Abort(
1041 1041 _(b"case-folding collision between %s and %s")
1042 1042 % (f, foldmap[fold])
1043 1043 )
1044 1044 foldmap[fold] = f
1045 1045
1046 1046 # check case-folding of directories
1047 1047 foldprefix = unfoldprefix = lastfull = b''
1048 1048 for fold, f in sorted(foldmap.items()):
1049 1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1050 1050 # the folded prefix matches but actual casing is different
1051 1051 raise error.Abort(
1052 1052 _(b"case-folding collision between %s and directory of %s")
1053 1053 % (lastfull, f)
1054 1054 )
1055 1055 foldprefix = fold + b'/'
1056 1056 unfoldprefix = f + b'/'
1057 1057 lastfull = f
1058 1058
1059 1059
1060 1060 def driverpreprocess(repo, ms, wctx, labels=None):
1061 1061 """run the preprocess step of the merge driver, if any
1062 1062
1063 1063 This is currently not implemented -- it's an extension point."""
1064 1064 return True
1065 1065
1066 1066
1067 1067 def driverconclude(repo, ms, wctx, labels=None):
1068 1068 """run the conclude step of the merge driver, if any
1069 1069
1070 1070 This is currently not implemented -- it's an extension point."""
1071 1071 return True
1072 1072
1073 1073
1074 1074 def _filesindirs(repo, manifest, dirs):
1075 1075 """
1076 1076 Generator that yields pairs of all the files in the manifest that are found
1077 1077 inside the directories listed in dirs, and which directory they are found
1078 1078 in.
1079 1079 """
1080 1080 for f in manifest:
1081 1081 for p in pathutil.finddirs(f):
1082 1082 if p in dirs:
1083 1083 yield f, p
1084 1084 break
1085 1085
1086 1086
1087 1087 def checkpathconflicts(repo, wctx, mctx, actions):
1088 1088 """
1089 1089 Check if any actions introduce path conflicts in the repository, updating
1090 1090 actions to record or handle the path conflict accordingly.
1091 1091 """
1092 1092 mf = wctx.manifest()
1093 1093
1094 1094 # The set of local files that conflict with a remote directory.
1095 1095 localconflicts = set()
1096 1096
1097 1097 # The set of directories that conflict with a remote file, and so may cause
1098 1098 # conflicts if they still contain any files after the merge.
1099 1099 remoteconflicts = set()
1100 1100
1101 1101 # The set of directories that appear as both a file and a directory in the
1102 1102 # remote manifest. These indicate an invalid remote manifest, which
1103 1103 # can't be updated to cleanly.
1104 1104 invalidconflicts = set()
1105 1105
1106 1106 # The set of directories that contain files that are being created.
1107 1107 createdfiledirs = set()
1108 1108
1109 1109 # The set of files deleted by all the actions.
1110 1110 deletedfiles = set()
1111 1111
1112 1112 for f, (m, args, msg) in actions.items():
1113 1113 if m in (
1114 1114 ACTION_CREATED,
1115 1115 ACTION_DELETED_CHANGED,
1116 1116 ACTION_MERGE,
1117 1117 ACTION_CREATED_MERGE,
1118 1118 ):
1119 1119 # This action may create a new local file.
1120 1120 createdfiledirs.update(pathutil.finddirs(f))
1121 1121 if mf.hasdir(f):
1122 1122 # The file aliases a local directory. This might be ok if all
1123 1123 # the files in the local directory are being deleted. This
1124 1124 # will be checked once we know what all the deleted files are.
1125 1125 remoteconflicts.add(f)
1126 1126 # Track the names of all deleted files.
1127 1127 if m == ACTION_REMOVE:
1128 1128 deletedfiles.add(f)
1129 1129 if m == ACTION_MERGE:
1130 1130 f1, f2, fa, move, anc = args
1131 1131 if move:
1132 1132 deletedfiles.add(f1)
1133 1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1134 1134 f2, flags = args
1135 1135 deletedfiles.add(f2)
1136 1136
1137 1137 # Check all directories that contain created files for path conflicts.
1138 1138 for p in createdfiledirs:
1139 1139 if p in mf:
1140 1140 if p in mctx:
1141 1141 # A file is in a directory which aliases both a local
1142 1142 # and a remote file. This is an internal inconsistency
1143 1143 # within the remote manifest.
1144 1144 invalidconflicts.add(p)
1145 1145 else:
1146 1146 # A file is in a directory which aliases a local file.
1147 1147 # We will need to rename the local file.
1148 1148 localconflicts.add(p)
1149 1149 if p in actions and actions[p][0] in (
1150 1150 ACTION_CREATED,
1151 1151 ACTION_DELETED_CHANGED,
1152 1152 ACTION_MERGE,
1153 1153 ACTION_CREATED_MERGE,
1154 1154 ):
1155 1155 # The file is in a directory which aliases a remote file.
1156 1156 # This is an internal inconsistency within the remote
1157 1157 # manifest.
1158 1158 invalidconflicts.add(p)
1159 1159
1160 1160 # Rename all local conflicting files that have not been deleted.
1161 1161 for p in localconflicts:
1162 1162 if p not in deletedfiles:
1163 1163 ctxname = bytes(wctx).rstrip(b'+')
1164 1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1165 1165 actions[pnew] = (
1166 1166 ACTION_PATH_CONFLICT_RESOLVE,
1167 1167 (p,),
1168 1168 b'local path conflict',
1169 1169 )
1170 1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1171 1171
1172 1172 if remoteconflicts:
1173 1173 # Check if all files in the conflicting directories have been removed.
1174 1174 ctxname = bytes(mctx).rstrip(b'+')
1175 1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1176 1176 if f not in deletedfiles:
1177 1177 m, args, msg = actions[p]
1178 1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1179 1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1180 1180 # Action was merge, just update target.
1181 1181 actions[pnew] = (m, args, msg)
1182 1182 else:
1183 1183 # Action was create, change to renamed get action.
1184 1184 fl = args[0]
1185 1185 actions[pnew] = (
1186 1186 ACTION_LOCAL_DIR_RENAME_GET,
1187 1187 (p, fl),
1188 1188 b'remote path conflict',
1189 1189 )
1190 1190 actions[p] = (
1191 1191 ACTION_PATH_CONFLICT,
1192 1192 (pnew, ACTION_REMOVE),
1193 1193 b'path conflict',
1194 1194 )
1195 1195 remoteconflicts.remove(p)
1196 1196 break
1197 1197
1198 1198 if invalidconflicts:
1199 1199 for p in invalidconflicts:
1200 1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1201 1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1202 1202
1203 1203
1204 1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1205 1205 """
1206 1206 Filters out actions that can ignored because the repo is narrowed.
1207 1207
1208 1208 Raise an exception if the merge cannot be completed because the repo is
1209 1209 narrowed.
1210 1210 """
1211 1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1212 1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1213 1213 # We mutate the items in the dict during iteration, so iterate
1214 1214 # over a copy.
1215 1215 for f, action in list(actions.items()):
1216 1216 if narrowmatch(f):
1217 1217 pass
1218 1218 elif not branchmerge:
1219 1219 del actions[f] # just updating, ignore changes outside clone
1220 1220 elif action[0] in nooptypes:
1221 1221 del actions[f] # merge does not affect file
1222 1222 elif action[0] in nonconflicttypes:
1223 1223 raise error.Abort(
1224 1224 _(
1225 1225 b'merge affects file \'%s\' outside narrow, '
1226 1226 b'which is not yet supported'
1227 1227 )
1228 1228 % f,
1229 1229 hint=_(b'merging in the other direction may work'),
1230 1230 )
1231 1231 else:
1232 1232 raise error.Abort(
1233 1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1234 1234 )
1235 1235
1236 1236
1237 1237 def manifestmerge(
1238 1238 repo,
1239 1239 wctx,
1240 1240 p2,
1241 1241 pa,
1242 1242 branchmerge,
1243 1243 force,
1244 1244 matcher,
1245 1245 acceptremote,
1246 1246 followcopies,
1247 1247 forcefulldiff=False,
1248 1248 ):
1249 1249 """
1250 1250 Merge wctx and p2 with ancestor pa and generate merge action list
1251 1251
1252 1252 branchmerge and force are as passed in to update
1253 1253 matcher = matcher to filter file lists
1254 1254 acceptremote = accept the incoming changes without prompting
1255 1255 """
1256 1256 if matcher is not None and matcher.always():
1257 1257 matcher = None
1258 1258
1259 1259 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1260 1260
1261 1261 # manifests fetched in order are going to be faster, so prime the caches
1262 1262 [
1263 1263 x.manifest()
1264 1264 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1265 1265 ]
1266 1266
1267 1267 if followcopies:
1268 1268 ret = copies.mergecopies(repo, wctx, p2, pa)
1269 1269 copy, movewithdir, diverge, renamedelete, dirmove = ret
1270 1270
1271 1271 boolbm = pycompat.bytestr(bool(branchmerge))
1272 1272 boolf = pycompat.bytestr(bool(force))
1273 1273 boolm = pycompat.bytestr(bool(matcher))
1274 1274 repo.ui.note(_(b"resolving manifests\n"))
1275 1275 repo.ui.debug(
1276 1276 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1277 1277 )
1278 1278 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1279 1279
1280 1280 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1281 1281 copied = set(copy.values())
1282 1282 copied.update(movewithdir.values())
1283 1283
1284 1284 if b'.hgsubstate' in m1 and wctx.rev() is None:
1285 1285 # Check whether sub state is modified, and overwrite the manifest
1286 1286 # to flag the change. If wctx is a committed revision, we shouldn't
1287 1287 # care for the dirty state of the working directory.
1288 1288 if any(wctx.sub(s).dirty() for s in wctx.substate):
1289 1289 m1[b'.hgsubstate'] = modifiednodeid
1290 1290
1291 1291 # Don't use m2-vs-ma optimization if:
1292 1292 # - ma is the same as m1 or m2, which we're just going to diff again later
1293 1293 # - The caller specifically asks for a full diff, which is useful during bid
1294 1294 # merge.
1295 1295 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1296 1296 # Identify which files are relevant to the merge, so we can limit the
1297 1297 # total m1-vs-m2 diff to just those files. This has significant
1298 1298 # performance benefits in large repositories.
1299 1299 relevantfiles = set(ma.diff(m2).keys())
1300 1300
1301 1301 # For copied and moved files, we need to add the source file too.
1302 1302 for copykey, copyvalue in pycompat.iteritems(copy):
1303 1303 if copyvalue in relevantfiles:
1304 1304 relevantfiles.add(copykey)
1305 1305 for movedirkey in movewithdir:
1306 1306 relevantfiles.add(movedirkey)
1307 1307 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1308 1308 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1309 1309
1310 1310 diff = m1.diff(m2, match=matcher)
1311 1311
1312 1312 actions = {}
1313 1313 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1314 1314 if n1 and n2: # file exists on both local and remote side
1315 1315 if f not in ma:
1316 1316 fa = copy.get(f, None)
1317 1317 if fa is not None:
1318 1318 actions[f] = (
1319 1319 ACTION_MERGE,
1320 1320 (f, f, fa, False, pa.node()),
1321 1321 b'both renamed from %s' % fa,
1322 1322 )
1323 1323 else:
1324 1324 actions[f] = (
1325 1325 ACTION_MERGE,
1326 1326 (f, f, None, False, pa.node()),
1327 1327 b'both created',
1328 1328 )
1329 1329 else:
1330 1330 a = ma[f]
1331 1331 fla = ma.flags(f)
1332 1332 nol = b'l' not in fl1 + fl2 + fla
1333 1333 if n2 == a and fl2 == fla:
1334 1334 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1335 1335 elif n1 == a and fl1 == fla: # local unchanged - use remote
1336 1336 if n1 == n2: # optimization: keep local content
1337 1337 actions[f] = (
1338 1338 ACTION_EXEC,
1339 1339 (fl2,),
1340 1340 b'update permissions',
1341 1341 )
1342 1342 else:
1343 1343 actions[f] = (
1344 1344 ACTION_GET,
1345 1345 (fl2, False),
1346 1346 b'remote is newer',
1347 1347 )
1348 1348 elif nol and n2 == a: # remote only changed 'x'
1349 1349 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1350 1350 elif nol and n1 == a: # local only changed 'x'
1351 1351 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1352 1352 else: # both changed something
1353 1353 actions[f] = (
1354 1354 ACTION_MERGE,
1355 1355 (f, f, f, False, pa.node()),
1356 1356 b'versions differ',
1357 1357 )
1358 1358 elif n1: # file exists only on local side
1359 1359 if f in copied:
1360 1360 pass # we'll deal with it on m2 side
1361 1361 elif f in movewithdir: # directory rename, move local
1362 1362 f2 = movewithdir[f]
1363 1363 if f2 in m2:
1364 1364 actions[f2] = (
1365 1365 ACTION_MERGE,
1366 1366 (f, f2, None, True, pa.node()),
1367 1367 b'remote directory rename, both created',
1368 1368 )
1369 1369 else:
1370 1370 actions[f2] = (
1371 1371 ACTION_DIR_RENAME_MOVE_LOCAL,
1372 1372 (f, fl1),
1373 1373 b'remote directory rename - move from %s' % f,
1374 1374 )
1375 1375 elif f in copy:
1376 1376 f2 = copy[f]
1377 1377 actions[f] = (
1378 1378 ACTION_MERGE,
1379 1379 (f, f2, f2, False, pa.node()),
1380 1380 b'local copied/moved from %s' % f2,
1381 1381 )
1382 1382 elif f in ma: # clean, a different, no remote
1383 1383 if n1 != ma[f]:
1384 1384 if acceptremote:
1385 1385 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1386 1386 else:
1387 1387 actions[f] = (
1388 1388 ACTION_CHANGED_DELETED,
1389 1389 (f, None, f, False, pa.node()),
1390 1390 b'prompt changed/deleted',
1391 1391 )
1392 1392 elif n1 == addednodeid:
1393 1393 # This extra 'a' is added by working copy manifest to mark
1394 1394 # the file as locally added. We should forget it instead of
1395 1395 # deleting it.
1396 1396 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1397 1397 else:
1398 1398 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1399 1399 elif n2: # file exists only on remote side
1400 1400 if f in copied:
1401 1401 pass # we'll deal with it on m1 side
1402 1402 elif f in movewithdir:
1403 1403 f2 = movewithdir[f]
1404 1404 if f2 in m1:
1405 1405 actions[f2] = (
1406 1406 ACTION_MERGE,
1407 1407 (f2, f, None, False, pa.node()),
1408 1408 b'local directory rename, both created',
1409 1409 )
1410 1410 else:
1411 1411 actions[f2] = (
1412 1412 ACTION_LOCAL_DIR_RENAME_GET,
1413 1413 (f, fl2),
1414 1414 b'local directory rename - get from %s' % f,
1415 1415 )
1416 1416 elif f in copy:
1417 1417 f2 = copy[f]
1418 1418 if f2 in m2:
1419 1419 actions[f] = (
1420 1420 ACTION_MERGE,
1421 1421 (f2, f, f2, False, pa.node()),
1422 1422 b'remote copied from %s' % f2,
1423 1423 )
1424 1424 else:
1425 1425 actions[f] = (
1426 1426 ACTION_MERGE,
1427 1427 (f2, f, f2, True, pa.node()),
1428 1428 b'remote moved from %s' % f2,
1429 1429 )
1430 1430 elif f not in ma:
1431 1431 # local unknown, remote created: the logic is described by the
1432 1432 # following table:
1433 1433 #
1434 1434 # force branchmerge different | action
1435 1435 # n * * | create
1436 1436 # y n * | create
1437 1437 # y y n | create
1438 1438 # y y y | merge
1439 1439 #
1440 1440 # Checking whether the files are different is expensive, so we
1441 1441 # don't do that when we can avoid it.
1442 1442 if not force:
1443 1443 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1444 1444 elif not branchmerge:
1445 1445 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1446 1446 else:
1447 1447 actions[f] = (
1448 1448 ACTION_CREATED_MERGE,
1449 1449 (fl2, pa.node()),
1450 1450 b'remote created, get or merge',
1451 1451 )
1452 1452 elif n2 != ma[f]:
1453 1453 df = None
1454 1454 for d in dirmove:
1455 1455 if f.startswith(d):
1456 1456 # new file added in a directory that was moved
1457 1457 df = dirmove[d] + f[len(d) :]
1458 1458 break
1459 1459 if df is not None and df in m1:
1460 1460 actions[df] = (
1461 1461 ACTION_MERGE,
1462 1462 (df, f, f, False, pa.node()),
1463 1463 b'local directory rename - respect move '
1464 1464 b'from %s' % f,
1465 1465 )
1466 1466 elif acceptremote:
1467 1467 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1468 1468 else:
1469 1469 actions[f] = (
1470 1470 ACTION_DELETED_CHANGED,
1471 1471 (None, f, f, False, pa.node()),
1472 1472 b'prompt deleted/changed',
1473 1473 )
1474 1474
1475 1475 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1476 1476 # If we are merging, look for path conflicts.
1477 1477 checkpathconflicts(repo, wctx, p2, actions)
1478 1478
1479 1479 narrowmatch = repo.narrowmatch()
1480 1480 if not narrowmatch.always():
1481 1481 # Updates "actions" in place
1482 1482 _filternarrowactions(narrowmatch, branchmerge, actions)
1483 1483
1484 1484 return actions, diverge, renamedelete
1485 1485
1486 1486
1487 1487 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1488 1488 """Resolves false conflicts where the nodeid changed but the content
1489 1489 remained the same."""
1490 1490 # We force a copy of actions.items() because we're going to mutate
1491 1491 # actions as we resolve trivial conflicts.
1492 1492 for f, (m, args, msg) in list(actions.items()):
1493 1493 if (
1494 1494 m == ACTION_CHANGED_DELETED
1495 1495 and f in ancestor
1496 1496 and not wctx[f].cmp(ancestor[f])
1497 1497 ):
1498 1498 # local did change but ended up with same content
1499 1499 actions[f] = ACTION_REMOVE, None, b'prompt same'
1500 1500 elif (
1501 1501 m == ACTION_DELETED_CHANGED
1502 1502 and f in ancestor
1503 1503 and not mctx[f].cmp(ancestor[f])
1504 1504 ):
1505 1505 # remote did change but ended up with same content
1506 1506 del actions[f] # don't get = keep local deleted
1507 1507
1508 1508
1509 1509 def calculateupdates(
1510 1510 repo,
1511 1511 wctx,
1512 1512 mctx,
1513 1513 ancestors,
1514 1514 branchmerge,
1515 1515 force,
1516 1516 acceptremote,
1517 1517 followcopies,
1518 1518 matcher=None,
1519 1519 mergeforce=False,
1520 1520 ):
1521 1521 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1522 1522 # Avoid cycle.
1523 1523 from . import sparse
1524 1524
1525 1525 if len(ancestors) == 1: # default
1526 1526 actions, diverge, renamedelete = manifestmerge(
1527 1527 repo,
1528 1528 wctx,
1529 1529 mctx,
1530 1530 ancestors[0],
1531 1531 branchmerge,
1532 1532 force,
1533 1533 matcher,
1534 1534 acceptremote,
1535 1535 followcopies,
1536 1536 )
1537 1537 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1538 1538
1539 1539 else: # only when merge.preferancestor=* - the default
1540 1540 repo.ui.note(
1541 1541 _(b"note: merging %s and %s using bids from ancestors %s\n")
1542 1542 % (
1543 1543 wctx,
1544 1544 mctx,
1545 1545 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1546 1546 )
1547 1547 )
1548 1548
1549 1549 # Call for bids
1550 1550 fbids = (
1551 1551 {}
1552 1552 ) # mapping filename to bids (action method to list af actions)
1553 1553 diverge, renamedelete = None, None
1554 1554 for ancestor in ancestors:
1555 1555 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1556 1556 actions, diverge1, renamedelete1 = manifestmerge(
1557 1557 repo,
1558 1558 wctx,
1559 1559 mctx,
1560 1560 ancestor,
1561 1561 branchmerge,
1562 1562 force,
1563 1563 matcher,
1564 1564 acceptremote,
1565 1565 followcopies,
1566 1566 forcefulldiff=True,
1567 1567 )
1568 1568 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1569 1569
1570 1570 # Track the shortest set of warning on the theory that bid
1571 1571 # merge will correctly incorporate more information
1572 1572 if diverge is None or len(diverge1) < len(diverge):
1573 1573 diverge = diverge1
1574 1574 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1575 1575 renamedelete = renamedelete1
1576 1576
1577 1577 for f, a in sorted(pycompat.iteritems(actions)):
1578 1578 m, args, msg = a
1579 1579 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1580 1580 if f in fbids:
1581 1581 d = fbids[f]
1582 1582 if m in d:
1583 1583 d[m].append(a)
1584 1584 else:
1585 1585 d[m] = [a]
1586 1586 else:
1587 1587 fbids[f] = {m: [a]}
1588 1588
1589 1589 # Pick the best bid for each file
1590 1590 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1591 1591 actions = {}
1592 1592 for f, bids in sorted(fbids.items()):
1593 1593 # bids is a mapping from action method to list af actions
1594 1594 # Consensus?
1595 1595 if len(bids) == 1: # all bids are the same kind of method
1596 1596 m, l = list(bids.items())[0]
1597 1597 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1598 1598 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1599 1599 actions[f] = l[0]
1600 1600 continue
1601 1601 # If keep is an option, just do it.
1602 1602 if ACTION_KEEP in bids:
1603 1603 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1604 1604 actions[f] = bids[ACTION_KEEP][0]
1605 1605 continue
1606 1606 # If there are gets and they all agree [how could they not?], do it.
1607 1607 if ACTION_GET in bids:
1608 1608 ga0 = bids[ACTION_GET][0]
1609 1609 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1610 1610 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1611 1611 actions[f] = ga0
1612 1612 continue
1613 1613 # TODO: Consider other simple actions such as mode changes
1614 1614 # Handle inefficient democrazy.
1615 1615 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1616 1616 for m, l in sorted(bids.items()):
1617 1617 for _f, args, msg in l:
1618 1618 repo.ui.note(b' %s -> %s\n' % (msg, m))
1619 1619 # Pick random action. TODO: Instead, prompt user when resolving
1620 1620 m, l = list(bids.items())[0]
1621 1621 repo.ui.warn(
1622 1622 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1623 1623 )
1624 1624 actions[f] = l[0]
1625 1625 continue
1626 1626 repo.ui.note(_(b'end of auction\n\n'))
1627 1627
1628 1628 if wctx.rev() is None:
1629 1629 fractions = _forgetremoved(wctx, mctx, branchmerge)
1630 1630 actions.update(fractions)
1631 1631
1632 1632 prunedactions = sparse.filterupdatesactions(
1633 1633 repo, wctx, mctx, branchmerge, actions
1634 1634 )
1635 1635 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1636 1636
1637 1637 return prunedactions, diverge, renamedelete
1638 1638
1639 1639
1640 1640 def _getcwd():
1641 1641 try:
1642 1642 return encoding.getcwd()
1643 1643 except OSError as err:
1644 1644 if err.errno == errno.ENOENT:
1645 1645 return None
1646 1646 raise
1647 1647
1648 1648
1649 1649 def batchremove(repo, wctx, actions):
1650 1650 """apply removes to the working directory
1651 1651
1652 1652 yields tuples for progress updates
1653 1653 """
1654 1654 verbose = repo.ui.verbose
1655 1655 cwd = _getcwd()
1656 1656 i = 0
1657 1657 for f, args, msg in actions:
1658 1658 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1659 1659 if verbose:
1660 1660 repo.ui.note(_(b"removing %s\n") % f)
1661 1661 wctx[f].audit()
1662 1662 try:
1663 1663 wctx[f].remove(ignoremissing=True)
1664 1664 except OSError as inst:
1665 1665 repo.ui.warn(
1666 1666 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1667 1667 )
1668 1668 if i == 100:
1669 1669 yield i, f
1670 1670 i = 0
1671 1671 i += 1
1672 1672 if i > 0:
1673 1673 yield i, f
1674 1674
1675 1675 if cwd and not _getcwd():
1676 1676 # cwd was removed in the course of removing files; print a helpful
1677 1677 # warning.
1678 1678 repo.ui.warn(
1679 1679 _(
1680 1680 b"current directory was removed\n"
1681 1681 b"(consider changing to repo root: %s)\n"
1682 1682 )
1683 1683 % repo.root
1684 1684 )
1685 1685
1686 1686
1687 1687 def batchget(repo, mctx, wctx, wantfiledata, actions):
1688 1688 """apply gets to the working directory
1689 1689
1690 1690 mctx is the context to get from
1691 1691
1692 1692 Yields arbitrarily many (False, tuple) for progress updates, followed by
1693 1693 exactly one (True, filedata). When wantfiledata is false, filedata is an
1694 1694 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1695 1695 mtime) of the file f written for each action.
1696 1696 """
1697 1697 filedata = {}
1698 1698 verbose = repo.ui.verbose
1699 1699 fctx = mctx.filectx
1700 1700 ui = repo.ui
1701 1701 i = 0
1702 1702 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1703 1703 for f, (flags, backup), msg in actions:
1704 1704 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1705 1705 if verbose:
1706 1706 repo.ui.note(_(b"getting %s\n") % f)
1707 1707
1708 1708 if backup:
1709 1709 # If a file or directory exists with the same name, back that
1710 1710 # up. Otherwise, look to see if there is a file that conflicts
1711 1711 # with a directory this file is in, and if so, back that up.
1712 1712 conflicting = f
1713 1713 if not repo.wvfs.lexists(f):
1714 1714 for p in pathutil.finddirs(f):
1715 1715 if repo.wvfs.isfileorlink(p):
1716 1716 conflicting = p
1717 1717 break
1718 1718 if repo.wvfs.lexists(conflicting):
1719 1719 orig = scmutil.backuppath(ui, repo, conflicting)
1720 1720 util.rename(repo.wjoin(conflicting), orig)
1721 1721 wfctx = wctx[f]
1722 1722 wfctx.clearunknown()
1723 1723 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1724 1724 size = wfctx.write(
1725 1725 fctx(f).data(),
1726 1726 flags,
1727 1727 backgroundclose=True,
1728 1728 atomictemp=atomictemp,
1729 1729 )
1730 1730 if wantfiledata:
1731 1731 s = wfctx.lstat()
1732 1732 mode = s.st_mode
1733 1733 mtime = s[stat.ST_MTIME]
1734 1734 filedata[f] = (mode, size, mtime) # for dirstate.normal
1735 1735 if i == 100:
1736 1736 yield False, (i, f)
1737 1737 i = 0
1738 1738 i += 1
1739 1739 if i > 0:
1740 1740 yield False, (i, f)
1741 1741 yield True, filedata
1742 1742
1743 1743
1744 1744 def _prefetchfiles(repo, ctx, actions):
1745 1745 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1746 1746 of merge actions. ``ctx`` is the context being merged in."""
1747 1747
1748 1748 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1749 1749 # don't touch the context to be merged in. 'cd' is skipped, because
1750 1750 # changed/deleted never resolves to something from the remote side.
1751 1751 oplist = [
1752 1752 actions[a]
1753 1753 for a in (
1754 1754 ACTION_GET,
1755 1755 ACTION_DELETED_CHANGED,
1756 1756 ACTION_LOCAL_DIR_RENAME_GET,
1757 1757 ACTION_MERGE,
1758 1758 )
1759 1759 ]
1760 1760 prefetch = scmutil.prefetchfiles
1761 1761 matchfiles = scmutil.matchfiles
1762 1762 prefetch(
1763 1763 repo,
1764 1764 [ctx.rev()],
1765 1765 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1766 1766 )
1767 1767
1768 1768
1769 1769 @attr.s(frozen=True)
1770 1770 class updateresult(object):
1771 1771 updatedcount = attr.ib()
1772 1772 mergedcount = attr.ib()
1773 1773 removedcount = attr.ib()
1774 1774 unresolvedcount = attr.ib()
1775 1775
1776 1776 def isempty(self):
1777 1777 return not (
1778 1778 self.updatedcount
1779 1779 or self.mergedcount
1780 1780 or self.removedcount
1781 1781 or self.unresolvedcount
1782 1782 )
1783 1783
1784 1784
1785 1785 def emptyactions():
1786 1786 """create an actions dict, to be populated and passed to applyupdates()"""
1787 1787 return dict(
1788 1788 (m, [])
1789 1789 for m in (
1790 1790 ACTION_ADD,
1791 1791 ACTION_ADD_MODIFIED,
1792 1792 ACTION_FORGET,
1793 1793 ACTION_GET,
1794 1794 ACTION_CHANGED_DELETED,
1795 1795 ACTION_DELETED_CHANGED,
1796 1796 ACTION_REMOVE,
1797 1797 ACTION_DIR_RENAME_MOVE_LOCAL,
1798 1798 ACTION_LOCAL_DIR_RENAME_GET,
1799 1799 ACTION_MERGE,
1800 1800 ACTION_EXEC,
1801 1801 ACTION_KEEP,
1802 1802 ACTION_PATH_CONFLICT,
1803 1803 ACTION_PATH_CONFLICT_RESOLVE,
1804 1804 )
1805 1805 )
1806 1806
1807 1807
1808 1808 def applyupdates(
1809 1809 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1810 1810 ):
1811 1811 """apply the merge action list to the working directory
1812 1812
1813 1813 wctx is the working copy context
1814 1814 mctx is the context to be merged into the working copy
1815 1815
1816 1816 Return a tuple of (counts, filedata), where counts is a tuple
1817 1817 (updated, merged, removed, unresolved) that describes how many
1818 1818 files were affected by the update, and filedata is as described in
1819 1819 batchget.
1820 1820 """
1821 1821
1822 1822 _prefetchfiles(repo, mctx, actions)
1823 1823
1824 1824 updated, merged, removed = 0, 0, 0
1825 1825 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1826 1826 moves = []
1827 1827 for m, l in actions.items():
1828 1828 l.sort()
1829 1829
1830 1830 # 'cd' and 'dc' actions are treated like other merge conflicts
1831 1831 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1832 1832 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1833 1833 mergeactions.extend(actions[ACTION_MERGE])
1834 1834 for f, args, msg in mergeactions:
1835 1835 f1, f2, fa, move, anc = args
1836 1836 if f == b'.hgsubstate': # merged internally
1837 1837 continue
1838 1838 if f1 is None:
1839 1839 fcl = filemerge.absentfilectx(wctx, fa)
1840 1840 else:
1841 1841 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1842 1842 fcl = wctx[f1]
1843 1843 if f2 is None:
1844 1844 fco = filemerge.absentfilectx(mctx, fa)
1845 1845 else:
1846 1846 fco = mctx[f2]
1847 1847 actx = repo[anc]
1848 1848 if fa in actx:
1849 1849 fca = actx[fa]
1850 1850 else:
1851 1851 # TODO: move to absentfilectx
1852 1852 fca = repo.filectx(f1, fileid=nullrev)
1853 1853 ms.add(fcl, fco, fca, f)
1854 1854 if f1 != f and move:
1855 1855 moves.append(f1)
1856 1856
1857 1857 # remove renamed files after safely stored
1858 1858 for f in moves:
1859 1859 if wctx[f].lexists():
1860 1860 repo.ui.debug(b"removing %s\n" % f)
1861 1861 wctx[f].audit()
1862 1862 wctx[f].remove()
1863 1863
1864 1864 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1865 1865 progress = repo.ui.makeprogress(
1866 1866 _(b'updating'), unit=_(b'files'), total=numupdates
1867 1867 )
1868 1868
1869 1869 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1870 1870 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1871 1871
1872 1872 # record path conflicts
1873 1873 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1874 1874 f1, fo = args
1875 1875 s = repo.ui.status
1876 1876 s(
1877 1877 _(
1878 1878 b"%s: path conflict - a file or link has the same name as a "
1879 1879 b"directory\n"
1880 1880 )
1881 1881 % f
1882 1882 )
1883 1883 if fo == b'l':
1884 1884 s(_(b"the local file has been renamed to %s\n") % f1)
1885 1885 else:
1886 1886 s(_(b"the remote file has been renamed to %s\n") % f1)
1887 1887 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1888 1888 ms.addpath(f, f1, fo)
1889 1889 progress.increment(item=f)
1890 1890
1891 1891 # When merging in-memory, we can't support worker processes, so set the
1892 1892 # per-item cost at 0 in that case.
1893 1893 cost = 0 if wctx.isinmemory() else 0.001
1894 1894
1895 1895 # remove in parallel (must come before resolving path conflicts and getting)
1896 1896 prog = worker.worker(
1897 1897 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1898 1898 )
1899 1899 for i, item in prog:
1900 1900 progress.increment(step=i, item=item)
1901 1901 removed = len(actions[ACTION_REMOVE])
1902 1902
1903 1903 # resolve path conflicts (must come before getting)
1904 1904 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1905 1905 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1906 1906 (f0,) = args
1907 1907 if wctx[f0].lexists():
1908 1908 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1909 1909 wctx[f].audit()
1910 1910 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1911 1911 wctx[f0].remove()
1912 1912 progress.increment(item=f)
1913 1913
1914 1914 # get in parallel.
1915 1915 threadsafe = repo.ui.configbool(
1916 1916 b'experimental', b'worker.wdir-get-thread-safe'
1917 1917 )
1918 1918 prog = worker.worker(
1919 1919 repo.ui,
1920 1920 cost,
1921 1921 batchget,
1922 1922 (repo, mctx, wctx, wantfiledata),
1923 1923 actions[ACTION_GET],
1924 1924 threadsafe=threadsafe,
1925 1925 hasretval=True,
1926 1926 )
1927 1927 getfiledata = {}
1928 1928 for final, res in prog:
1929 1929 if final:
1930 1930 getfiledata = res
1931 1931 else:
1932 1932 i, item = res
1933 1933 progress.increment(step=i, item=item)
1934 1934 updated = len(actions[ACTION_GET])
1935 1935
1936 1936 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1937 1937 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1938 1938
1939 1939 # forget (manifest only, just log it) (must come first)
1940 1940 for f, args, msg in actions[ACTION_FORGET]:
1941 1941 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1942 1942 progress.increment(item=f)
1943 1943
1944 1944 # re-add (manifest only, just log it)
1945 1945 for f, args, msg in actions[ACTION_ADD]:
1946 1946 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1947 1947 progress.increment(item=f)
1948 1948
1949 1949 # re-add/mark as modified (manifest only, just log it)
1950 1950 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1951 1951 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1952 1952 progress.increment(item=f)
1953 1953
1954 1954 # keep (noop, just log it)
1955 1955 for f, args, msg in actions[ACTION_KEEP]:
1956 1956 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1957 1957 # no progress
1958 1958
1959 1959 # directory rename, move local
1960 1960 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1961 1961 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1962 1962 progress.increment(item=f)
1963 1963 f0, flags = args
1964 1964 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1965 1965 wctx[f].audit()
1966 1966 wctx[f].write(wctx.filectx(f0).data(), flags)
1967 1967 wctx[f0].remove()
1968 1968 updated += 1
1969 1969
1970 1970 # local directory rename, get
1971 1971 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1972 1972 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1973 1973 progress.increment(item=f)
1974 1974 f0, flags = args
1975 1975 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1976 1976 wctx[f].write(mctx.filectx(f0).data(), flags)
1977 1977 updated += 1
1978 1978
1979 1979 # exec
1980 1980 for f, args, msg in actions[ACTION_EXEC]:
1981 1981 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1982 1982 progress.increment(item=f)
1983 1983 (flags,) = args
1984 1984 wctx[f].audit()
1985 1985 wctx[f].setflags(b'l' in flags, b'x' in flags)
1986 1986 updated += 1
1987 1987
1988 1988 # the ordering is important here -- ms.mergedriver will raise if the merge
1989 1989 # driver has changed, and we want to be able to bypass it when overwrite is
1990 1990 # True
1991 1991 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1992 1992
1993 1993 if usemergedriver:
1994 1994 if wctx.isinmemory():
1995 1995 raise error.InMemoryMergeConflictsError(
1996 1996 b"in-memory merge does not support mergedriver"
1997 1997 )
1998 1998 ms.commit()
1999 1999 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2000 2000 # the driver might leave some files unresolved
2001 2001 unresolvedf = set(ms.unresolved())
2002 2002 if not proceed:
2003 2003 # XXX setting unresolved to at least 1 is a hack to make sure we
2004 2004 # error out
2005 2005 return updateresult(
2006 2006 updated, merged, removed, max(len(unresolvedf), 1)
2007 2007 )
2008 2008 newactions = []
2009 2009 for f, args, msg in mergeactions:
2010 2010 if f in unresolvedf:
2011 2011 newactions.append((f, args, msg))
2012 2012 mergeactions = newactions
2013 2013
2014 2014 try:
2015 2015 # premerge
2016 2016 tocomplete = []
2017 2017 for f, args, msg in mergeactions:
2018 2018 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2019 2019 progress.increment(item=f)
2020 2020 if f == b'.hgsubstate': # subrepo states need updating
2021 2021 subrepoutil.submerge(
2022 2022 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2023 2023 )
2024 2024 continue
2025 2025 wctx[f].audit()
2026 2026 complete, r = ms.preresolve(f, wctx)
2027 2027 if not complete:
2028 2028 numupdates += 1
2029 2029 tocomplete.append((f, args, msg))
2030 2030
2031 2031 # merge
2032 2032 for f, args, msg in tocomplete:
2033 2033 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2034 2034 progress.increment(item=f, total=numupdates)
2035 2035 ms.resolve(f, wctx)
2036 2036
2037 2037 finally:
2038 2038 ms.commit()
2039 2039
2040 2040 unresolved = ms.unresolvedcount()
2041 2041
2042 2042 if (
2043 2043 usemergedriver
2044 2044 and not unresolved
2045 2045 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2046 2046 ):
2047 2047 if not driverconclude(repo, ms, wctx, labels=labels):
2048 2048 # XXX setting unresolved to at least 1 is a hack to make sure we
2049 2049 # error out
2050 2050 unresolved = max(unresolved, 1)
2051 2051
2052 2052 ms.commit()
2053 2053
2054 2054 msupdated, msmerged, msremoved = ms.counts()
2055 2055 updated += msupdated
2056 2056 merged += msmerged
2057 2057 removed += msremoved
2058 2058
2059 2059 extraactions = ms.actions()
2060 2060 if extraactions:
2061 2061 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2062 2062 for k, acts in pycompat.iteritems(extraactions):
2063 2063 actions[k].extend(acts)
2064 2064 if k == ACTION_GET and wantfiledata:
2065 2065 # no filedata until mergestate is updated to provide it
2066 2066 for a in acts:
2067 2067 getfiledata[a[0]] = None
2068 2068 # Remove these files from actions[ACTION_MERGE] as well. This is
2069 2069 # important because in recordupdates, files in actions[ACTION_MERGE]
2070 2070 # are processed after files in other actions, and the merge driver
2071 2071 # might add files to those actions via extraactions above. This can
2072 2072 # lead to a file being recorded twice, with poor results. This is
2073 2073 # especially problematic for actions[ACTION_REMOVE] (currently only
2074 2074 # possible with the merge driver in the initial merge process;
2075 2075 # interrupted merges don't go through this flow).
2076 2076 #
2077 2077 # The real fix here is to have indexes by both file and action so
2078 2078 # that when the action for a file is changed it is automatically
2079 2079 # reflected in the other action lists. But that involves a more
2080 2080 # complex data structure, so this will do for now.
2081 2081 #
2082 2082 # We don't need to do the same operation for 'dc' and 'cd' because
2083 2083 # those lists aren't consulted again.
2084 2084 mfiles.difference_update(a[0] for a in acts)
2085 2085
2086 2086 actions[ACTION_MERGE] = [
2087 2087 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2088 2088 ]
2089 2089
2090 2090 progress.complete()
2091 2091 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2092 2092 return updateresult(updated, merged, removed, unresolved), getfiledata
2093 2093
2094 2094
2095 2095 def recordupdates(repo, actions, branchmerge, getfiledata):
2096 2096 b"record merge actions to the dirstate"
2097 2097 # remove (must come first)
2098 2098 for f, args, msg in actions.get(ACTION_REMOVE, []):
2099 2099 if branchmerge:
2100 2100 repo.dirstate.remove(f)
2101 2101 else:
2102 2102 repo.dirstate.drop(f)
2103 2103
2104 2104 # forget (must come first)
2105 2105 for f, args, msg in actions.get(ACTION_FORGET, []):
2106 2106 repo.dirstate.drop(f)
2107 2107
2108 2108 # resolve path conflicts
2109 2109 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2110 2110 (f0,) = args
2111 2111 origf0 = repo.dirstate.copied(f0) or f0
2112 2112 repo.dirstate.add(f)
2113 2113 repo.dirstate.copy(origf0, f)
2114 2114 if f0 == origf0:
2115 2115 repo.dirstate.remove(f0)
2116 2116 else:
2117 2117 repo.dirstate.drop(f0)
2118 2118
2119 2119 # re-add
2120 2120 for f, args, msg in actions.get(ACTION_ADD, []):
2121 2121 repo.dirstate.add(f)
2122 2122
2123 2123 # re-add/mark as modified
2124 2124 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2125 2125 if branchmerge:
2126 2126 repo.dirstate.normallookup(f)
2127 2127 else:
2128 2128 repo.dirstate.add(f)
2129 2129
2130 2130 # exec change
2131 2131 for f, args, msg in actions.get(ACTION_EXEC, []):
2132 2132 repo.dirstate.normallookup(f)
2133 2133
2134 2134 # keep
2135 2135 for f, args, msg in actions.get(ACTION_KEEP, []):
2136 2136 pass
2137 2137
2138 2138 # get
2139 2139 for f, args, msg in actions.get(ACTION_GET, []):
2140 2140 if branchmerge:
2141 2141 repo.dirstate.otherparent(f)
2142 2142 else:
2143 2143 parentfiledata = getfiledata[f] if getfiledata else None
2144 2144 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2145 2145
2146 2146 # merge
2147 2147 for f, args, msg in actions.get(ACTION_MERGE, []):
2148 2148 f1, f2, fa, move, anc = args
2149 2149 if branchmerge:
2150 2150 # We've done a branch merge, mark this file as merged
2151 2151 # so that we properly record the merger later
2152 2152 repo.dirstate.merge(f)
2153 2153 if f1 != f2: # copy/rename
2154 2154 if move:
2155 2155 repo.dirstate.remove(f1)
2156 2156 if f1 != f:
2157 2157 repo.dirstate.copy(f1, f)
2158 2158 else:
2159 2159 repo.dirstate.copy(f2, f)
2160 2160 else:
2161 2161 # We've update-merged a locally modified file, so
2162 2162 # we set the dirstate to emulate a normal checkout
2163 2163 # of that file some time in the past. Thus our
2164 2164 # merge will appear as a normal local file
2165 2165 # modification.
2166 2166 if f2 == f: # file not locally copied/moved
2167 2167 repo.dirstate.normallookup(f)
2168 2168 if move:
2169 2169 repo.dirstate.drop(f1)
2170 2170
2171 2171 # directory rename, move local
2172 2172 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2173 2173 f0, flag = args
2174 2174 if branchmerge:
2175 2175 repo.dirstate.add(f)
2176 2176 repo.dirstate.remove(f0)
2177 2177 repo.dirstate.copy(f0, f)
2178 2178 else:
2179 2179 repo.dirstate.normal(f)
2180 2180 repo.dirstate.drop(f0)
2181 2181
2182 2182 # directory rename, get
2183 2183 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2184 2184 f0, flag = args
2185 2185 if branchmerge:
2186 2186 repo.dirstate.add(f)
2187 2187 repo.dirstate.copy(f0, f)
2188 2188 else:
2189 2189 repo.dirstate.normal(f)
2190 2190
2191 2191
2192 2192 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2193 2193 UPDATECHECK_NONE = b'none'
2194 2194 UPDATECHECK_LINEAR = b'linear'
2195 2195 UPDATECHECK_NO_CONFLICT = b'noconflict'
2196 2196
2197 2197
2198 2198 def update(
2199 2199 repo,
2200 2200 node,
2201 2201 branchmerge,
2202 2202 force,
2203 2203 ancestor=None,
2204 2204 mergeancestor=False,
2205 2205 labels=None,
2206 2206 matcher=None,
2207 2207 mergeforce=False,
2208 2208 updatecheck=None,
2209 2209 wc=None,
2210 2210 ):
2211 2211 """
2212 2212 Perform a merge between the working directory and the given node
2213 2213
2214 2214 node = the node to update to
2215 2215 branchmerge = whether to merge between branches
2216 2216 force = whether to force branch merging or file overwriting
2217 2217 matcher = a matcher to filter file lists (dirstate not updated)
2218 2218 mergeancestor = whether it is merging with an ancestor. If true,
2219 2219 we should accept the incoming changes for any prompts that occur.
2220 2220 If false, merging with an ancestor (fast-forward) is only allowed
2221 2221 between different named branches. This flag is used by rebase extension
2222 2222 as a temporary fix and should be avoided in general.
2223 2223 labels = labels to use for base, local and other
2224 2224 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2225 2225 this is True, then 'force' should be True as well.
2226 2226
2227 2227 The table below shows all the behaviors of the update command given the
2228 2228 -c/--check and -C/--clean or no options, whether the working directory is
2229 2229 dirty, whether a revision is specified, and the relationship of the parent
2230 2230 rev to the target rev (linear or not). Match from top first. The -n
2231 2231 option doesn't exist on the command line, but represents the
2232 2232 experimental.updatecheck=noconflict option.
2233 2233
2234 2234 This logic is tested by test-update-branches.t.
2235 2235
2236 2236 -c -C -n -m dirty rev linear | result
2237 2237 y y * * * * * | (1)
2238 2238 y * y * * * * | (1)
2239 2239 y * * y * * * | (1)
2240 2240 * y y * * * * | (1)
2241 2241 * y * y * * * | (1)
2242 2242 * * y y * * * | (1)
2243 2243 * * * * * n n | x
2244 2244 * * * * n * * | ok
2245 2245 n n n n y * y | merge
2246 2246 n n n n y y n | (2)
2247 2247 n n n y y * * | merge
2248 2248 n n y n y * * | merge if no conflict
2249 2249 n y n n y * * | discard
2250 2250 y n n n y * * | (3)
2251 2251
2252 2252 x = can't happen
2253 2253 * = don't-care
2254 2254 1 = incompatible options (checked in commands.py)
2255 2255 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2256 2256 3 = abort: uncommitted changes (checked in commands.py)
2257 2257
2258 2258 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2259 2259 to repo[None] if None is passed.
2260 2260
2261 2261 Return the same tuple as applyupdates().
2262 2262 """
2263 2263 # Avoid cycle.
2264 2264 from . import sparse
2265 2265
2266 2266 # This function used to find the default destination if node was None, but
2267 2267 # that's now in destutil.py.
2268 2268 assert node is not None
2269 2269 if not branchmerge and not force:
2270 2270 # TODO: remove the default once all callers that pass branchmerge=False
2271 2271 # and force=False pass a value for updatecheck. We may want to allow
2272 2272 # updatecheck='abort' to better suppport some of these callers.
2273 2273 if updatecheck is None:
2274 2274 updatecheck = UPDATECHECK_LINEAR
2275 2275 if updatecheck not in (
2276 2276 UPDATECHECK_NONE,
2277 2277 UPDATECHECK_LINEAR,
2278 2278 UPDATECHECK_NO_CONFLICT,
2279 2279 ):
2280 2280 raise ValueError(
2281 2281 r'Invalid updatecheck %r (can accept %r)'
2282 2282 % (
2283 2283 updatecheck,
2284 2284 (
2285 2285 UPDATECHECK_NONE,
2286 2286 UPDATECHECK_LINEAR,
2287 2287 UPDATECHECK_NO_CONFLICT,
2288 2288 ),
2289 2289 )
2290 2290 )
2291 2291 # If we're doing a partial update, we need to skip updating
2292 2292 # the dirstate, so make a note of any partial-ness to the
2293 2293 # update here.
2294 2294 if matcher is None or matcher.always():
2295 2295 partial = False
2296 2296 else:
2297 2297 partial = True
2298 2298 with repo.wlock():
2299 2299 if wc is None:
2300 2300 wc = repo[None]
2301 2301 pl = wc.parents()
2302 2302 p1 = pl[0]
2303 2303 p2 = repo[node]
2304 2304 if ancestor is not None:
2305 2305 pas = [repo[ancestor]]
2306 2306 else:
2307 2307 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2308 2308 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2309 2309 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2310 2310 else:
2311 2311 pas = [p1.ancestor(p2, warn=branchmerge)]
2312 2312
2313 2313 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2314 2314
2315 2315 overwrite = force and not branchmerge
2316 2316 ### check phase
2317 2317 if not overwrite:
2318 2318 if len(pl) > 1:
2319 2319 raise error.Abort(_(b"outstanding uncommitted merge"))
2320 2320 ms = mergestate.read(repo)
2321 2321 if list(ms.unresolved()):
2322 2322 raise error.Abort(
2323 2323 _(b"outstanding merge conflicts"),
2324 2324 hint=_(b"use 'hg resolve' to resolve"),
2325 2325 )
2326 2326 if branchmerge:
2327 2327 if pas == [p2]:
2328 2328 raise error.Abort(
2329 2329 _(
2330 2330 b"merging with a working directory ancestor"
2331 2331 b" has no effect"
2332 2332 )
2333 2333 )
2334 2334 elif pas == [p1]:
2335 2335 if not mergeancestor and wc.branch() == p2.branch():
2336 2336 raise error.Abort(
2337 2337 _(b"nothing to merge"),
2338 2338 hint=_(b"use 'hg update' or check 'hg heads'"),
2339 2339 )
2340 2340 if not force and (wc.files() or wc.deleted()):
2341 2341 raise error.Abort(
2342 2342 _(b"uncommitted changes"),
2343 2343 hint=_(b"use 'hg status' to list changes"),
2344 2344 )
2345 2345 if not wc.isinmemory():
2346 2346 for s in sorted(wc.substate):
2347 2347 wc.sub(s).bailifchanged()
2348 2348
2349 2349 elif not overwrite:
2350 2350 if p1 == p2: # no-op update
2351 2351 # call the hooks and exit early
2352 2352 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2353 2353 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2354 2354 return updateresult(0, 0, 0, 0)
2355 2355
2356 2356 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2357 2357 [p1],
2358 2358 [p2],
2359 2359 ): # nonlinear
2360 2360 dirty = wc.dirty(missing=True)
2361 2361 if dirty:
2362 2362 # Branching is a bit strange to ensure we do the minimal
2363 2363 # amount of call to obsutil.foreground.
2364 2364 foreground = obsutil.foreground(repo, [p1.node()])
2365 2365 # note: the <node> variable contains a random identifier
2366 2366 if repo[node].node() in foreground:
2367 2367 pass # allow updating to successors
2368 2368 else:
2369 2369 msg = _(b"uncommitted changes")
2370 2370 hint = _(b"commit or update --clean to discard changes")
2371 2371 raise error.UpdateAbort(msg, hint=hint)
2372 2372 else:
2373 2373 # Allow jumping branches if clean and specific rev given
2374 2374 pass
2375 2375
2376 2376 if overwrite:
2377 2377 pas = [wc]
2378 2378 elif not branchmerge:
2379 2379 pas = [p1]
2380 2380
2381 2381 # deprecated config: merge.followcopies
2382 2382 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2383 2383 if overwrite:
2384 2384 followcopies = False
2385 2385 elif not pas[0]:
2386 2386 followcopies = False
2387 2387 if not branchmerge and not wc.dirty(missing=True):
2388 2388 followcopies = False
2389 2389
2390 2390 ### calculate phase
2391 2391 actionbyfile, diverge, renamedelete = calculateupdates(
2392 2392 repo,
2393 2393 wc,
2394 2394 p2,
2395 2395 pas,
2396 2396 branchmerge,
2397 2397 force,
2398 2398 mergeancestor,
2399 2399 followcopies,
2400 2400 matcher=matcher,
2401 2401 mergeforce=mergeforce,
2402 2402 )
2403 2403
2404 2404 if updatecheck == UPDATECHECK_NO_CONFLICT:
2405 2405 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2406 2406 if m not in (
2407 2407 ACTION_GET,
2408 2408 ACTION_KEEP,
2409 2409 ACTION_EXEC,
2410 2410 ACTION_REMOVE,
2411 2411 ACTION_PATH_CONFLICT_RESOLVE,
2412 2412 ):
2413 2413 msg = _(b"conflicting changes")
2414 2414 hint = _(b"commit or update --clean to discard changes")
2415 2415 raise error.Abort(msg, hint=hint)
2416 2416
2417 2417 # Prompt and create actions. Most of this is in the resolve phase
2418 2418 # already, but we can't handle .hgsubstate in filemerge or
2419 2419 # subrepoutil.submerge yet so we have to keep prompting for it.
2420 2420 if b'.hgsubstate' in actionbyfile:
2421 2421 f = b'.hgsubstate'
2422 2422 m, args, msg = actionbyfile[f]
2423 2423 prompts = filemerge.partextras(labels)
2424 2424 prompts[b'f'] = f
2425 2425 if m == ACTION_CHANGED_DELETED:
2426 2426 if repo.ui.promptchoice(
2427 2427 _(
2428 2428 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2429 2429 b"use (c)hanged version or (d)elete?"
2430 2430 b"$$ &Changed $$ &Delete"
2431 2431 )
2432 2432 % prompts,
2433 2433 0,
2434 2434 ):
2435 2435 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2436 2436 elif f in p1:
2437 2437 actionbyfile[f] = (
2438 2438 ACTION_ADD_MODIFIED,
2439 2439 None,
2440 2440 b'prompt keep',
2441 2441 )
2442 2442 else:
2443 2443 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2444 2444 elif m == ACTION_DELETED_CHANGED:
2445 2445 f1, f2, fa, move, anc = args
2446 2446 flags = p2[f2].flags()
2447 2447 if (
2448 2448 repo.ui.promptchoice(
2449 2449 _(
2450 2450 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2451 2451 b"use (c)hanged version or leave (d)eleted?"
2452 2452 b"$$ &Changed $$ &Deleted"
2453 2453 )
2454 2454 % prompts,
2455 2455 0,
2456 2456 )
2457 2457 == 0
2458 2458 ):
2459 2459 actionbyfile[f] = (
2460 2460 ACTION_GET,
2461 2461 (flags, False),
2462 2462 b'prompt recreating',
2463 2463 )
2464 2464 else:
2465 2465 del actionbyfile[f]
2466 2466
2467 2467 # Convert to dictionary-of-lists format
2468 2468 actions = emptyactions()
2469 2469 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2470 2470 if m not in actions:
2471 2471 actions[m] = []
2472 2472 actions[m].append((f, args, msg))
2473 2473
2474 2474 if not util.fscasesensitive(repo.path):
2475 2475 # check collision between files only in p2 for clean update
2476 2476 if not branchmerge and (
2477 2477 force or not wc.dirty(missing=True, branch=False)
2478 2478 ):
2479 2479 _checkcollision(repo, p2.manifest(), None)
2480 2480 else:
2481 2481 _checkcollision(repo, wc.manifest(), actions)
2482 2482
2483 2483 # divergent renames
2484 2484 for f, fl in sorted(pycompat.iteritems(diverge)):
2485 2485 repo.ui.warn(
2486 2486 _(
2487 2487 b"note: possible conflict - %s was renamed "
2488 2488 b"multiple times to:\n"
2489 2489 )
2490 2490 % f
2491 2491 )
2492 2492 for nf in sorted(fl):
2493 2493 repo.ui.warn(b" %s\n" % nf)
2494 2494
2495 2495 # rename and delete
2496 2496 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2497 2497 repo.ui.warn(
2498 2498 _(
2499 2499 b"note: possible conflict - %s was deleted "
2500 2500 b"and renamed to:\n"
2501 2501 )
2502 2502 % f
2503 2503 )
2504 2504 for nf in sorted(fl):
2505 2505 repo.ui.warn(b" %s\n" % nf)
2506 2506
2507 2507 ### apply phase
2508 2508 if not branchmerge: # just jump to the new rev
2509 2509 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2510 2510 if not partial and not wc.isinmemory():
2511 2511 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2512 2512 # note that we're in the middle of an update
2513 2513 repo.vfs.write(b'updatestate', p2.hex())
2514 2514
2515 2515 # Advertise fsmonitor when its presence could be useful.
2516 2516 #
2517 2517 # We only advertise when performing an update from an empty working
2518 2518 # directory. This typically only occurs during initial clone.
2519 2519 #
2520 2520 # We give users a mechanism to disable the warning in case it is
2521 2521 # annoying.
2522 2522 #
2523 2523 # We only allow on Linux and MacOS because that's where fsmonitor is
2524 2524 # considered stable.
2525 2525 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2526 2526 fsmonitorthreshold = repo.ui.configint(
2527 2527 b'fsmonitor', b'warn_update_file_count'
2528 2528 )
2529 2529 try:
2530 2530 # avoid cycle: extensions -> cmdutil -> merge
2531 2531 from . import extensions
2532 2532
2533 2533 extensions.find(b'fsmonitor')
2534 2534 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2535 2535 # We intentionally don't look at whether fsmonitor has disabled
2536 2536 # itself because a) fsmonitor may have already printed a warning
2537 2537 # b) we only care about the config state here.
2538 2538 except KeyError:
2539 2539 fsmonitorenabled = False
2540 2540
2541 2541 if (
2542 2542 fsmonitorwarning
2543 2543 and not fsmonitorenabled
2544 2544 and p1.node() == nullid
2545 2545 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2546 2546 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2547 2547 ):
2548 2548 repo.ui.warn(
2549 2549 _(
2550 2550 b'(warning: large working directory being used without '
2551 2551 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2552 2552 b'see "hg help -e fsmonitor")\n'
2553 2553 )
2554 2554 )
2555 2555
2556 2556 updatedirstate = not partial and not wc.isinmemory()
2557 2557 wantfiledata = updatedirstate and not branchmerge
2558 2558 stats, getfiledata = applyupdates(
2559 2559 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2560 2560 )
2561 2561
2562 2562 if updatedirstate:
2563 2563 with repo.dirstate.parentchange():
2564 2564 repo.setparents(fp1, fp2)
2565 2565 recordupdates(repo, actions, branchmerge, getfiledata)
2566 2566 # update completed, clear state
2567 2567 util.unlink(repo.vfs.join(b'updatestate'))
2568 2568
2569 2569 if not branchmerge:
2570 2570 repo.dirstate.setbranch(p2.branch())
2571 2571
2572 2572 # If we're updating to a location, clean up any stale temporary includes
2573 2573 # (ex: this happens during hg rebase --abort).
2574 2574 if not branchmerge:
2575 2575 sparse.prunetemporaryincludes(repo)
2576 2576
2577 2577 if not partial:
2578 2578 repo.hook(
2579 2579 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2580 2580 )
2581 2581 return stats
2582 2582
2583 2583
2584 2584 def graft(
2585 2585 repo, ctx, pctx, labels=None, keepparent=False, keepconflictparent=False
2586 2586 ):
2587 2587 """Do a graft-like merge.
2588 2588
2589 2589 This is a merge where the merge ancestor is chosen such that one
2590 2590 or more changesets are grafted onto the current changeset. In
2591 2591 addition to the merge, this fixes up the dirstate to include only
2592 2592 a single parent (if keepparent is False) and tries to duplicate any
2593 2593 renames/copies appropriately.
2594 2594
2595 2595 ctx - changeset to rebase
2596 2596 pctx - merge base, usually ctx.p1()
2597 2597 labels - merge labels eg ['local', 'graft']
2598 2598 keepparent - keep second parent if any
2599 2599 keepconflictparent - if unresolved, keep parent used for the merge
2600 2600
2601 2601 """
2602 2602 # If we're grafting a descendant onto an ancestor, be sure to pass
2603 2603 # mergeancestor=True to update. This does two things: 1) allows the merge if
2604 2604 # the destination is the same as the parent of the ctx (so we can use graft
2605 2605 # to copy commits), and 2) informs update that the incoming changes are
2606 2606 # newer than the destination so it doesn't prompt about "remote changed foo
2607 2607 # which local deleted".
2608 2608 mergeancestor = repo.changelog.isancestor(repo[b'.'].node(), ctx.node())
2609 2609
2610 2610 stats = update(
2611 2611 repo,
2612 2612 ctx.node(),
2613 2613 True,
2614 2614 True,
2615 2615 pctx.node(),
2616 2616 mergeancestor=mergeancestor,
2617 2617 labels=labels,
2618 2618 )
2619 2619
2620 2620 if keepconflictparent and stats.unresolvedcount:
2621 2621 pother = ctx.node()
2622 2622 else:
2623 2623 pother = nullid
2624 2624 parents = ctx.parents()
2625 2625 if keepparent and len(parents) == 2 and pctx in parents:
2626 2626 parents.remove(pctx)
2627 2627 pother = parents[0].node()
2628 2628
2629 2629 with repo.dirstate.parentchange():
2630 2630 repo.setparents(repo[b'.'].node(), pother)
2631 2631 repo.dirstate.write(repo.currenttransaction())
2632 2632 # fix up dirstate for copies and renames
2633 2633 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2634 2634 return stats
2635 2635
2636 2636
2637 2637 def purge(
2638 2638 repo,
2639 2639 matcher,
2640 2640 ignored=False,
2641 2641 removeemptydirs=True,
2642 2642 removefiles=True,
2643 2643 abortonerror=False,
2644 2644 noop=False,
2645 2645 ):
2646 2646 """Purge the working directory of untracked files.
2647 2647
2648 2648 ``matcher`` is a matcher configured to scan the working directory -
2649 2649 potentially a subset.
2650 2650
2651 2651 ``ignored`` controls whether ignored files should also be purged.
2652 2652
2653 2653 ``removeemptydirs`` controls whether empty directories should be removed.
2654 2654
2655 2655 ``removefiles`` controls whether files are removed.
2656 2656
2657 2657 ``abortonerror`` causes an exception to be raised if an error occurs
2658 2658 deleting a file or directory.
2659 2659
2660 2660 ``noop`` controls whether to actually remove files. If not defined, actions
2661 2661 will be taken.
2662 2662
2663 2663 Returns an iterable of relative paths in the working directory that were
2664 2664 or would be removed.
2665 2665 """
2666 2666
2667 2667 def remove(removefn, path):
2668 2668 try:
2669 2669 removefn(path)
2670 2670 except OSError:
2671 2671 m = _(b'%s cannot be removed') % path
2672 2672 if abortonerror:
2673 2673 raise error.Abort(m)
2674 2674 else:
2675 2675 repo.ui.warn(_(b'warning: %s\n') % m)
2676 2676
2677 2677 # There's no API to copy a matcher. So mutate the passed matcher and
2678 2678 # restore it when we're done.
2679 oldexplicitdir = matcher.explicitdir
2680 2679 oldtraversedir = matcher.traversedir
2681 2680
2682 2681 res = []
2683 2682
2684 2683 try:
2685 2684 if removeemptydirs:
2686 2685 directories = []
2687 matcher.explicitdir = matcher.traversedir = directories.append
2686 matcher.traversedir = directories.append
2688 2687
2689 2688 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2690 2689
2691 2690 if removefiles:
2692 2691 for f in sorted(status.unknown + status.ignored):
2693 2692 if not noop:
2694 2693 repo.ui.note(_(b'removing file %s\n') % f)
2695 2694 remove(repo.wvfs.unlink, f)
2696 2695 res.append(f)
2697 2696
2698 2697 if removeemptydirs:
2699 2698 for f in sorted(directories, reverse=True):
2700 2699 if matcher(f) and not repo.wvfs.listdir(f):
2701 2700 if not noop:
2702 2701 repo.ui.note(_(b'removing directory %s\n') % f)
2703 2702 remove(repo.wvfs.rmdir, f)
2704 2703 res.append(f)
2705 2704
2706 2705 return res
2707 2706
2708 2707 finally:
2709 matcher.explicitdir = oldexplicitdir
2710 2708 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now