##// END OF EJS Templates
merge: cache the fs checks made during [_checkunknownfiles]...
Arseniy Alekseyev -
r50784:c7624b1a default
parent child Browse files
Show More
@@ -1,2491 +1,2492 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import struct
11 11
12 12 from .i18n import _
13 13 from .node import nullrev
14 14 from .thirdparty import attr
15 15 from .utils import stringutil
16 16 from .dirstateutils import timestamp
17 17 from . import (
18 18 copies,
19 19 encoding,
20 20 error,
21 21 filemerge,
22 22 match as matchmod,
23 23 mergestate as mergestatemod,
24 24 obsutil,
25 25 pathutil,
26 26 policy,
27 27 pycompat,
28 28 scmutil,
29 29 subrepoutil,
30 30 util,
31 31 worker,
32 32 )
33 33
34 34 _pack = struct.pack
35 35 _unpack = struct.unpack
36 36
37 37
38 38 def _getcheckunknownconfig(repo, section, name):
39 39 config = repo.ui.config(section, name)
40 40 valid = [b'abort', b'ignore', b'warn']
41 41 if config not in valid:
42 42 validstr = b', '.join([b"'" + v + b"'" for v in valid])
43 43 msg = _(b"%s.%s not valid ('%s' is none of %s)")
44 44 msg %= (section, name, config, validstr)
45 45 raise error.ConfigError(msg)
46 46 return config
47 47
48 48
49 49 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
50 50 if wctx.isinmemory():
51 51 # Nothing to do in IMM because nothing in the "working copy" can be an
52 52 # unknown file.
53 53 #
54 54 # Note that we should bail out here, not in ``_checkunknownfiles()``,
55 55 # because that function does other useful work.
56 56 return False
57 57
58 58 if f2 is None:
59 59 f2 = f
60 60 return (
61 61 repo.wvfs.audit.check(f)
62 62 and repo.wvfs.isfileorlink(f)
63 63 and repo.dirstate.normalize(f) not in repo.dirstate
64 64 and mctx[f2].cmp(wctx[f])
65 65 )
66 66
67 67
68 68 class _unknowndirschecker:
69 69 """
70 70 Look for any unknown files or directories that may have a path conflict
71 71 with a file. If any path prefix of the file exists as a file or link,
72 72 then it conflicts. If the file itself is a directory that contains any
73 73 file that is not tracked, then it conflicts.
74 74
75 75 Returns the shortest path at which a conflict occurs, or None if there is
76 76 no conflict.
77 77 """
78 78
79 79 def __init__(self):
80 80 # A set of paths known to be good. This prevents repeated checking of
81 81 # dirs. It will be updated with any new dirs that are checked and found
82 82 # to be safe.
83 83 self._unknowndircache = set()
84 84
85 85 # A set of paths that are known to be absent. This prevents repeated
86 86 # checking of subdirectories that are known not to exist. It will be
87 87 # updated with any new dirs that are checked and found to be absent.
88 88 self._missingdircache = set()
89 89
90 90 def __call__(self, repo, wctx, f):
91 91 if wctx.isinmemory():
92 92 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
93 93 return False
94 94
95 95 # Check for path prefixes that exist as unknown files.
96 96 for p in reversed(list(pathutil.finddirs(f))):
97 97 if p in self._missingdircache:
98 98 return
99 99 if p in self._unknowndircache:
100 100 continue
101 101 if repo.wvfs.audit.check(p):
102 102 if (
103 103 repo.wvfs.isfileorlink(p)
104 104 and repo.dirstate.normalize(p) not in repo.dirstate
105 105 ):
106 106 return p
107 107 if not repo.wvfs.lexists(p):
108 108 self._missingdircache.add(p)
109 109 return
110 110 self._unknowndircache.add(p)
111 111
112 112 # Check if the file conflicts with a directory containing unknown files.
113 113 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
114 114 # Does the directory contain any files that are not in the dirstate?
115 115 for p, dirs, files in repo.wvfs.walk(f):
116 116 for fn in files:
117 117 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
118 118 relf = repo.dirstate.normalize(relf, isknown=True)
119 119 if relf not in repo.dirstate:
120 120 return f
121 121 return None
122 122
123 123
124 124 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
125 125 """
126 126 Considers any actions that care about the presence of conflicting unknown
127 127 files. For some actions, the result is to abort; for others, it is to
128 128 choose a different action.
129 129 """
130 130 fileconflicts = set()
131 131 pathconflicts = set()
132 132 warnconflicts = set()
133 133 abortconflicts = set()
134 134 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
135 135 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
136 136 pathconfig = repo.ui.configbool(
137 137 b'experimental', b'merge.checkpathconflicts'
138 138 )
139 139 if not force:
140 140
141 141 def collectconflicts(conflicts, config):
142 142 if config == b'abort':
143 143 abortconflicts.update(conflicts)
144 144 elif config == b'warn':
145 145 warnconflicts.update(conflicts)
146 146
147 147 checkunknowndirs = _unknowndirschecker()
148 for f in mresult.files(
149 (
150 mergestatemod.ACTION_CREATED,
151 mergestatemod.ACTION_DELETED_CHANGED,
152 )
153 ):
154 if _checkunknownfile(repo, wctx, mctx, f):
155 fileconflicts.add(f)
156 elif pathconfig and f not in wctx:
157 path = checkunknowndirs(repo, wctx, f)
158 if path is not None:
159 pathconflicts.add(path)
148 with repo.wvfs.audit.cached():
149 for f in mresult.files(
150 (
151 mergestatemod.ACTION_CREATED,
152 mergestatemod.ACTION_DELETED_CHANGED,
153 )
154 ):
155 if _checkunknownfile(repo, wctx, mctx, f):
156 fileconflicts.add(f)
157 elif pathconfig and f not in wctx:
158 path = checkunknowndirs(repo, wctx, f)
159 if path is not None:
160 pathconflicts.add(path)
160 161 for f, args, msg in mresult.getactions(
161 162 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
162 163 ):
163 164 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
164 165 fileconflicts.add(f)
165 166
166 167 allconflicts = fileconflicts | pathconflicts
167 168 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
168 169 unknownconflicts = allconflicts - ignoredconflicts
169 170 collectconflicts(ignoredconflicts, ignoredconfig)
170 171 collectconflicts(unknownconflicts, unknownconfig)
171 172 else:
172 173 for f, args, msg in list(
173 174 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
174 175 ):
175 176 fl2, anc = args
176 177 different = _checkunknownfile(repo, wctx, mctx, f)
177 178 if repo.dirstate._ignore(f):
178 179 config = ignoredconfig
179 180 else:
180 181 config = unknownconfig
181 182
182 183 # The behavior when force is True is described by this table:
183 184 # config different mergeforce | action backup
184 185 # * n * | get n
185 186 # * y y | merge -
186 187 # abort y n | merge - (1)
187 188 # warn y n | warn + get y
188 189 # ignore y n | get y
189 190 #
190 191 # (1) this is probably the wrong behavior here -- we should
191 192 # probably abort, but some actions like rebases currently
192 193 # don't like an abort happening in the middle of
193 194 # merge.update.
194 195 if not different:
195 196 mresult.addfile(
196 197 f,
197 198 mergestatemod.ACTION_GET,
198 199 (fl2, False),
199 200 b'remote created',
200 201 )
201 202 elif mergeforce or config == b'abort':
202 203 mresult.addfile(
203 204 f,
204 205 mergestatemod.ACTION_MERGE,
205 206 (f, f, None, False, anc),
206 207 b'remote differs from untracked local',
207 208 )
208 209 elif config == b'abort':
209 210 abortconflicts.add(f)
210 211 else:
211 212 if config == b'warn':
212 213 warnconflicts.add(f)
213 214 mresult.addfile(
214 215 f,
215 216 mergestatemod.ACTION_GET,
216 217 (fl2, True),
217 218 b'remote created',
218 219 )
219 220
220 221 for f in sorted(abortconflicts):
221 222 warn = repo.ui.warn
222 223 if f in pathconflicts:
223 224 if repo.wvfs.isfileorlink(f):
224 225 warn(_(b"%s: untracked file conflicts with directory\n") % f)
225 226 else:
226 227 warn(_(b"%s: untracked directory conflicts with file\n") % f)
227 228 else:
228 229 warn(_(b"%s: untracked file differs\n") % f)
229 230 if abortconflicts:
230 231 raise error.StateError(
231 232 _(
232 233 b"untracked files in working directory "
233 234 b"differ from files in requested revision"
234 235 )
235 236 )
236 237
237 238 for f in sorted(warnconflicts):
238 239 if repo.wvfs.isfileorlink(f):
239 240 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
240 241 else:
241 242 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
242 243
243 244 for f, args, msg in list(
244 245 mresult.getactions([mergestatemod.ACTION_CREATED])
245 246 ):
246 247 backup = (
247 248 f in fileconflicts
248 249 or pathconflicts
249 250 and (
250 251 f in pathconflicts
251 252 or any(p in pathconflicts for p in pathutil.finddirs(f))
252 253 )
253 254 )
254 255 (flags,) = args
255 256 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
256 257
257 258
258 259 def _forgetremoved(wctx, mctx, branchmerge, mresult):
259 260 """
260 261 Forget removed files
261 262
262 263 If we're jumping between revisions (as opposed to merging), and if
263 264 neither the working directory nor the target rev has the file,
264 265 then we need to remove it from the dirstate, to prevent the
265 266 dirstate from listing the file when it is no longer in the
266 267 manifest.
267 268
268 269 If we're merging, and the other revision has removed a file
269 270 that is not present in the working directory, we need to mark it
270 271 as removed.
271 272 """
272 273
273 274 m = mergestatemod.ACTION_FORGET
274 275 if branchmerge:
275 276 m = mergestatemod.ACTION_REMOVE
276 277 for f in wctx.deleted():
277 278 if f not in mctx:
278 279 mresult.addfile(f, m, None, b"forget deleted")
279 280
280 281 if not branchmerge:
281 282 for f in wctx.removed():
282 283 if f not in mctx:
283 284 mresult.addfile(
284 285 f,
285 286 mergestatemod.ACTION_FORGET,
286 287 None,
287 288 b"forget removed",
288 289 )
289 290
290 291
291 292 def _checkcollision(repo, wmf, mresult):
292 293 """
293 294 Check for case-folding collisions.
294 295 """
295 296 # If the repo is narrowed, filter out files outside the narrowspec.
296 297 narrowmatch = repo.narrowmatch()
297 298 if not narrowmatch.always():
298 299 pmmf = set(wmf.walk(narrowmatch))
299 300 if mresult:
300 301 for f in list(mresult.files()):
301 302 if not narrowmatch(f):
302 303 mresult.removefile(f)
303 304 else:
304 305 # build provisional merged manifest up
305 306 pmmf = set(wmf)
306 307
307 308 if mresult:
308 309 # KEEP and EXEC are no-op
309 310 for f in mresult.files(
310 311 (
311 312 mergestatemod.ACTION_ADD,
312 313 mergestatemod.ACTION_ADD_MODIFIED,
313 314 mergestatemod.ACTION_FORGET,
314 315 mergestatemod.ACTION_GET,
315 316 mergestatemod.ACTION_CHANGED_DELETED,
316 317 mergestatemod.ACTION_DELETED_CHANGED,
317 318 )
318 319 ):
319 320 pmmf.add(f)
320 321 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
321 322 pmmf.discard(f)
322 323 for f, args, msg in mresult.getactions(
323 324 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
324 325 ):
325 326 f2, flags = args
326 327 pmmf.discard(f2)
327 328 pmmf.add(f)
328 329 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
329 330 pmmf.add(f)
330 331 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
331 332 f1, f2, fa, move, anc = args
332 333 if move:
333 334 pmmf.discard(f1)
334 335 pmmf.add(f)
335 336
336 337 # check case-folding collision in provisional merged manifest
337 338 foldmap = {}
338 339 for f in pmmf:
339 340 fold = util.normcase(f)
340 341 if fold in foldmap:
341 342 msg = _(b"case-folding collision between %s and %s")
342 343 msg %= (f, foldmap[fold])
343 344 raise error.StateError(msg)
344 345 foldmap[fold] = f
345 346
346 347 # check case-folding of directories
347 348 foldprefix = unfoldprefix = lastfull = b''
348 349 for fold, f in sorted(foldmap.items()):
349 350 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
350 351 # the folded prefix matches but actual casing is different
351 352 msg = _(b"case-folding collision between %s and directory of %s")
352 353 msg %= (lastfull, f)
353 354 raise error.StateError(msg)
354 355 foldprefix = fold + b'/'
355 356 unfoldprefix = f + b'/'
356 357 lastfull = f
357 358
358 359
359 360 def _filesindirs(repo, manifest, dirs):
360 361 """
361 362 Generator that yields pairs of all the files in the manifest that are found
362 363 inside the directories listed in dirs, and which directory they are found
363 364 in.
364 365 """
365 366 for f in manifest:
366 367 for p in pathutil.finddirs(f):
367 368 if p in dirs:
368 369 yield f, p
369 370 break
370 371
371 372
372 373 def checkpathconflicts(repo, wctx, mctx, mresult):
373 374 """
374 375 Check if any actions introduce path conflicts in the repository, updating
375 376 actions to record or handle the path conflict accordingly.
376 377 """
377 378 mf = wctx.manifest()
378 379
379 380 # The set of local files that conflict with a remote directory.
380 381 localconflicts = set()
381 382
382 383 # The set of directories that conflict with a remote file, and so may cause
383 384 # conflicts if they still contain any files after the merge.
384 385 remoteconflicts = set()
385 386
386 387 # The set of directories that appear as both a file and a directory in the
387 388 # remote manifest. These indicate an invalid remote manifest, which
388 389 # can't be updated to cleanly.
389 390 invalidconflicts = set()
390 391
391 392 # The set of directories that contain files that are being created.
392 393 createdfiledirs = set()
393 394
394 395 # The set of files deleted by all the actions.
395 396 deletedfiles = set()
396 397
397 398 for f in mresult.files(
398 399 (
399 400 mergestatemod.ACTION_CREATED,
400 401 mergestatemod.ACTION_DELETED_CHANGED,
401 402 mergestatemod.ACTION_MERGE,
402 403 mergestatemod.ACTION_CREATED_MERGE,
403 404 )
404 405 ):
405 406 # This action may create a new local file.
406 407 createdfiledirs.update(pathutil.finddirs(f))
407 408 if mf.hasdir(f):
408 409 # The file aliases a local directory. This might be ok if all
409 410 # the files in the local directory are being deleted. This
410 411 # will be checked once we know what all the deleted files are.
411 412 remoteconflicts.add(f)
412 413 # Track the names of all deleted files.
413 414 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
414 415 deletedfiles.add(f)
415 416 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
416 417 f1, f2, fa, move, anc = args
417 418 if move:
418 419 deletedfiles.add(f1)
419 420 for (f, args, msg) in mresult.getactions(
420 421 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
421 422 ):
422 423 f2, flags = args
423 424 deletedfiles.add(f2)
424 425
425 426 # Check all directories that contain created files for path conflicts.
426 427 for p in createdfiledirs:
427 428 if p in mf:
428 429 if p in mctx:
429 430 # A file is in a directory which aliases both a local
430 431 # and a remote file. This is an internal inconsistency
431 432 # within the remote manifest.
432 433 invalidconflicts.add(p)
433 434 else:
434 435 # A file is in a directory which aliases a local file.
435 436 # We will need to rename the local file.
436 437 localconflicts.add(p)
437 438 pd = mresult.getfile(p)
438 439 if pd and pd[0] in (
439 440 mergestatemod.ACTION_CREATED,
440 441 mergestatemod.ACTION_DELETED_CHANGED,
441 442 mergestatemod.ACTION_MERGE,
442 443 mergestatemod.ACTION_CREATED_MERGE,
443 444 ):
444 445 # The file is in a directory which aliases a remote file.
445 446 # This is an internal inconsistency within the remote
446 447 # manifest.
447 448 invalidconflicts.add(p)
448 449
449 450 # Rename all local conflicting files that have not been deleted.
450 451 for p in localconflicts:
451 452 if p not in deletedfiles:
452 453 ctxname = bytes(wctx).rstrip(b'+')
453 454 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
454 455 porig = wctx[p].copysource() or p
455 456 mresult.addfile(
456 457 pnew,
457 458 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
458 459 (p, porig),
459 460 b'local path conflict',
460 461 )
461 462 mresult.addfile(
462 463 p,
463 464 mergestatemod.ACTION_PATH_CONFLICT,
464 465 (pnew, b'l'),
465 466 b'path conflict',
466 467 )
467 468
468 469 if remoteconflicts:
469 470 # Check if all files in the conflicting directories have been removed.
470 471 ctxname = bytes(mctx).rstrip(b'+')
471 472 for f, p in _filesindirs(repo, mf, remoteconflicts):
472 473 if f not in deletedfiles:
473 474 m, args, msg = mresult.getfile(p)
474 475 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
475 476 if m in (
476 477 mergestatemod.ACTION_DELETED_CHANGED,
477 478 mergestatemod.ACTION_MERGE,
478 479 ):
479 480 # Action was merge, just update target.
480 481 mresult.addfile(pnew, m, args, msg)
481 482 else:
482 483 # Action was create, change to renamed get action.
483 484 fl = args[0]
484 485 mresult.addfile(
485 486 pnew,
486 487 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
487 488 (p, fl),
488 489 b'remote path conflict',
489 490 )
490 491 mresult.addfile(
491 492 p,
492 493 mergestatemod.ACTION_PATH_CONFLICT,
493 494 (pnew, b'r'),
494 495 b'path conflict',
495 496 )
496 497 remoteconflicts.remove(p)
497 498 break
498 499
499 500 if invalidconflicts:
500 501 for p in invalidconflicts:
501 502 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
502 503 raise error.StateError(
503 504 _(b"destination manifest contains path conflicts")
504 505 )
505 506
506 507
507 508 def _filternarrowactions(narrowmatch, branchmerge, mresult):
508 509 """
509 510 Filters out actions that can ignored because the repo is narrowed.
510 511
511 512 Raise an exception if the merge cannot be completed because the repo is
512 513 narrowed.
513 514 """
514 515 # We mutate the items in the dict during iteration, so iterate
515 516 # over a copy.
516 517 for f, action in mresult.filemap():
517 518 if narrowmatch(f):
518 519 pass
519 520 elif not branchmerge:
520 521 mresult.removefile(f) # just updating, ignore changes outside clone
521 522 elif action[0].no_op:
522 523 mresult.removefile(f) # merge does not affect file
523 524 elif action[0].narrow_safe:
524 525 if not f.endswith(b'/'):
525 526 mresult.removefile(f) # merge won't affect on-disk files
526 527
527 528 mresult.addcommitinfo(
528 529 f, b'outside-narrow-merge-action', action[0].changes
529 530 )
530 531 else: # TODO: handle the tree case
531 532 msg = _(
532 533 b'merge affects file \'%s\' outside narrow, '
533 534 b'which is not yet supported'
534 535 )
535 536 hint = _(b'merging in the other direction may work')
536 537 raise error.Abort(msg % f, hint=hint)
537 538 else:
538 539 msg = _(b'conflict in file \'%s\' is outside narrow clone')
539 540 raise error.StateError(msg % f)
540 541
541 542
542 543 class mergeresult:
543 544 """An object representing result of merging manifests.
544 545
545 546 It has information about what actions need to be performed on dirstate
546 547 mapping of divergent renames and other such cases."""
547 548
548 549 def __init__(self):
549 550 """
550 551 filemapping: dict of filename as keys and action related info as values
551 552 diverge: mapping of source name -> list of dest name for
552 553 divergent renames
553 554 renamedelete: mapping of source name -> list of destinations for files
554 555 deleted on one side and renamed on other.
555 556 commitinfo: dict containing data which should be used on commit
556 557 contains a filename -> info mapping
557 558 actionmapping: dict of action names as keys and values are dict of
558 559 filename as key and related data as values
559 560 """
560 561 self._filemapping = {}
561 562 self._diverge = {}
562 563 self._renamedelete = {}
563 564 self._commitinfo = collections.defaultdict(dict)
564 565 self._actionmapping = collections.defaultdict(dict)
565 566
566 567 def updatevalues(self, diverge, renamedelete):
567 568 self._diverge = diverge
568 569 self._renamedelete = renamedelete
569 570
570 571 def addfile(self, filename, action, data, message):
571 572 """adds a new file to the mergeresult object
572 573
573 574 filename: file which we are adding
574 575 action: one of mergestatemod.ACTION_*
575 576 data: a tuple of information like fctx and ctx related to this merge
576 577 message: a message about the merge
577 578 """
578 579 # if the file already existed, we need to delete it's old
579 580 # entry form _actionmapping too
580 581 if filename in self._filemapping:
581 582 a, d, m = self._filemapping[filename]
582 583 del self._actionmapping[a][filename]
583 584
584 585 self._filemapping[filename] = (action, data, message)
585 586 self._actionmapping[action][filename] = (data, message)
586 587
587 588 def getfile(self, filename, default_return=None):
588 589 """returns (action, args, msg) about this file
589 590
590 591 returns default_return if the file is not present"""
591 592 if filename in self._filemapping:
592 593 return self._filemapping[filename]
593 594 return default_return
594 595
595 596 def files(self, actions=None):
596 597 """returns files on which provided action needs to perfromed
597 598
598 599 If actions is None, all files are returned
599 600 """
600 601 # TODO: think whether we should return renamedelete and
601 602 # diverge filenames also
602 603 if actions is None:
603 604 for f in self._filemapping:
604 605 yield f
605 606
606 607 else:
607 608 for a in actions:
608 609 for f in self._actionmapping[a]:
609 610 yield f
610 611
611 612 def removefile(self, filename):
612 613 """removes a file from the mergeresult object as the file might
613 614 not merging anymore"""
614 615 action, data, message = self._filemapping[filename]
615 616 del self._filemapping[filename]
616 617 del self._actionmapping[action][filename]
617 618
618 619 def getactions(self, actions, sort=False):
619 620 """get list of files which are marked with these actions
620 621 if sort is true, files for each action is sorted and then added
621 622
622 623 Returns a list of tuple of form (filename, data, message)
623 624 """
624 625 for a in actions:
625 626 if sort:
626 627 for f in sorted(self._actionmapping[a]):
627 628 args, msg = self._actionmapping[a][f]
628 629 yield f, args, msg
629 630 else:
630 631 for f, (args, msg) in self._actionmapping[a].items():
631 632 yield f, args, msg
632 633
633 634 def len(self, actions=None):
634 635 """returns number of files which needs actions
635 636
636 637 if actions is passed, total of number of files in that action
637 638 only is returned"""
638 639
639 640 if actions is None:
640 641 return len(self._filemapping)
641 642
642 643 return sum(len(self._actionmapping[a]) for a in actions)
643 644
644 645 def filemap(self, sort=False):
645 646 if sorted:
646 647 for key, val in sorted(self._filemapping.items()):
647 648 yield key, val
648 649 else:
649 650 for key, val in self._filemapping.items():
650 651 yield key, val
651 652
652 653 def addcommitinfo(self, filename, key, value):
653 654 """adds key-value information about filename which will be required
654 655 while committing this merge"""
655 656 self._commitinfo[filename][key] = value
656 657
657 658 @property
658 659 def diverge(self):
659 660 return self._diverge
660 661
661 662 @property
662 663 def renamedelete(self):
663 664 return self._renamedelete
664 665
665 666 @property
666 667 def commitinfo(self):
667 668 return self._commitinfo
668 669
669 670 @property
670 671 def actionsdict(self):
671 672 """returns a dictionary of actions to be perfomed with action as key
672 673 and a list of files and related arguments as values"""
673 674 res = collections.defaultdict(list)
674 675 for a, d in self._actionmapping.items():
675 676 for f, (args, msg) in d.items():
676 677 res[a].append((f, args, msg))
677 678 return res
678 679
679 680 def setactions(self, actions):
680 681 self._filemapping = actions
681 682 self._actionmapping = collections.defaultdict(dict)
682 683 for f, (act, data, msg) in self._filemapping.items():
683 684 self._actionmapping[act][f] = data, msg
684 685
685 686 def hasconflicts(self):
686 687 """tells whether this merge resulted in some actions which can
687 688 result in conflicts or not"""
688 689 for a in self._actionmapping.keys():
689 690 if (
690 691 a
691 692 not in (
692 693 mergestatemod.ACTION_GET,
693 694 mergestatemod.ACTION_EXEC,
694 695 mergestatemod.ACTION_REMOVE,
695 696 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
696 697 )
697 698 and self._actionmapping[a]
698 699 and not a.no_op
699 700 ):
700 701 return True
701 702
702 703 return False
703 704
704 705
705 706 def manifestmerge(
706 707 repo,
707 708 wctx,
708 709 p2,
709 710 pa,
710 711 branchmerge,
711 712 force,
712 713 matcher,
713 714 acceptremote,
714 715 followcopies,
715 716 forcefulldiff=False,
716 717 ):
717 718 """
718 719 Merge wctx and p2 with ancestor pa and generate merge action list
719 720
720 721 branchmerge and force are as passed in to update
721 722 matcher = matcher to filter file lists
722 723 acceptremote = accept the incoming changes without prompting
723 724
724 725 Returns an object of mergeresult class
725 726 """
726 727 mresult = mergeresult()
727 728 if matcher is not None and matcher.always():
728 729 matcher = None
729 730
730 731 # manifests fetched in order are going to be faster, so prime the caches
731 732 [
732 733 x.manifest()
733 734 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
734 735 ]
735 736
736 737 branch_copies1 = copies.branch_copies()
737 738 branch_copies2 = copies.branch_copies()
738 739 diverge = {}
739 740 # information from merge which is needed at commit time
740 741 # for example choosing filelog of which parent to commit
741 742 # TODO: use specific constants in future for this mapping
742 743 if followcopies:
743 744 branch_copies1, branch_copies2, diverge = copies.mergecopies(
744 745 repo, wctx, p2, pa
745 746 )
746 747
747 748 boolbm = pycompat.bytestr(bool(branchmerge))
748 749 boolf = pycompat.bytestr(bool(force))
749 750 boolm = pycompat.bytestr(bool(matcher))
750 751 repo.ui.note(_(b"resolving manifests\n"))
751 752 repo.ui.debug(
752 753 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
753 754 )
754 755 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
755 756
756 757 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
757 758 copied1 = set(branch_copies1.copy.values())
758 759 copied1.update(branch_copies1.movewithdir.values())
759 760 copied2 = set(branch_copies2.copy.values())
760 761 copied2.update(branch_copies2.movewithdir.values())
761 762
762 763 if b'.hgsubstate' in m1 and wctx.rev() is None:
763 764 # Check whether sub state is modified, and overwrite the manifest
764 765 # to flag the change. If wctx is a committed revision, we shouldn't
765 766 # care for the dirty state of the working directory.
766 767 if any(wctx.sub(s).dirty() for s in wctx.substate):
767 768 m1[b'.hgsubstate'] = repo.nodeconstants.modifiednodeid
768 769
769 770 # Don't use m2-vs-ma optimization if:
770 771 # - ma is the same as m1 or m2, which we're just going to diff again later
771 772 # - The caller specifically asks for a full diff, which is useful during bid
772 773 # merge.
773 774 # - we are tracking salvaged files specifically hence should process all
774 775 # files
775 776 if (
776 777 pa not in ([wctx, p2] + wctx.parents())
777 778 and not forcefulldiff
778 779 and not (
779 780 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
780 781 or repo.filecopiesmode == b'changeset-sidedata'
781 782 )
782 783 ):
783 784 # Identify which files are relevant to the merge, so we can limit the
784 785 # total m1-vs-m2 diff to just those files. This has significant
785 786 # performance benefits in large repositories.
786 787 relevantfiles = set(ma.diff(m2).keys())
787 788
788 789 # For copied and moved files, we need to add the source file too.
789 790 for copykey, copyvalue in branch_copies1.copy.items():
790 791 if copyvalue in relevantfiles:
791 792 relevantfiles.add(copykey)
792 793 for movedirkey in branch_copies1.movewithdir:
793 794 relevantfiles.add(movedirkey)
794 795 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
795 796 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
796 797
797 798 diff = m1.diff(m2, match=matcher)
798 799
799 800 for f, ((n1, fl1), (n2, fl2)) in diff.items():
800 801 if n1 and n2: # file exists on both local and remote side
801 802 if f not in ma:
802 803 # TODO: what if they're renamed from different sources?
803 804 fa = branch_copies1.copy.get(
804 805 f, None
805 806 ) or branch_copies2.copy.get(f, None)
806 807 args, msg = None, None
807 808 if fa is not None:
808 809 args = (f, f, fa, False, pa.node())
809 810 msg = b'both renamed from %s' % fa
810 811 else:
811 812 args = (f, f, None, False, pa.node())
812 813 msg = b'both created'
813 814 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
814 815 elif f in branch_copies1.copy:
815 816 fa = branch_copies1.copy[f]
816 817 mresult.addfile(
817 818 f,
818 819 mergestatemod.ACTION_MERGE,
819 820 (f, fa, fa, False, pa.node()),
820 821 b'local replaced from %s' % fa,
821 822 )
822 823 elif f in branch_copies2.copy:
823 824 fa = branch_copies2.copy[f]
824 825 mresult.addfile(
825 826 f,
826 827 mergestatemod.ACTION_MERGE,
827 828 (fa, f, fa, False, pa.node()),
828 829 b'other replaced from %s' % fa,
829 830 )
830 831 else:
831 832 a = ma[f]
832 833 fla = ma.flags(f)
833 834 nol = b'l' not in fl1 + fl2 + fla
834 835 if n2 == a and fl2 == fla:
835 836 mresult.addfile(
836 837 f,
837 838 mergestatemod.ACTION_KEEP,
838 839 (),
839 840 b'remote unchanged',
840 841 )
841 842 elif n1 == a and fl1 == fla: # local unchanged - use remote
842 843 if n1 == n2: # optimization: keep local content
843 844 mresult.addfile(
844 845 f,
845 846 mergestatemod.ACTION_EXEC,
846 847 (fl2,),
847 848 b'update permissions',
848 849 )
849 850 else:
850 851 mresult.addfile(
851 852 f,
852 853 mergestatemod.ACTION_GET,
853 854 (fl2, False),
854 855 b'remote is newer',
855 856 )
856 857 if branchmerge:
857 858 mresult.addcommitinfo(
858 859 f, b'filenode-source', b'other'
859 860 )
860 861 elif nol and n2 == a: # remote only changed 'x'
861 862 mresult.addfile(
862 863 f,
863 864 mergestatemod.ACTION_EXEC,
864 865 (fl2,),
865 866 b'update permissions',
866 867 )
867 868 elif nol and n1 == a: # local only changed 'x'
868 869 mresult.addfile(
869 870 f,
870 871 mergestatemod.ACTION_GET,
871 872 (fl1, False),
872 873 b'remote is newer',
873 874 )
874 875 if branchmerge:
875 876 mresult.addcommitinfo(f, b'filenode-source', b'other')
876 877 else: # both changed something
877 878 mresult.addfile(
878 879 f,
879 880 mergestatemod.ACTION_MERGE,
880 881 (f, f, f, False, pa.node()),
881 882 b'versions differ',
882 883 )
883 884 elif n1: # file exists only on local side
884 885 if f in copied2:
885 886 pass # we'll deal with it on m2 side
886 887 elif (
887 888 f in branch_copies1.movewithdir
888 889 ): # directory rename, move local
889 890 f2 = branch_copies1.movewithdir[f]
890 891 if f2 in m2:
891 892 mresult.addfile(
892 893 f2,
893 894 mergestatemod.ACTION_MERGE,
894 895 (f, f2, None, True, pa.node()),
895 896 b'remote directory rename, both created',
896 897 )
897 898 else:
898 899 mresult.addfile(
899 900 f2,
900 901 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
901 902 (f, fl1),
902 903 b'remote directory rename - move from %s' % f,
903 904 )
904 905 elif f in branch_copies1.copy:
905 906 f2 = branch_copies1.copy[f]
906 907 mresult.addfile(
907 908 f,
908 909 mergestatemod.ACTION_MERGE,
909 910 (f, f2, f2, False, pa.node()),
910 911 b'local copied/moved from %s' % f2,
911 912 )
912 913 elif f in ma: # clean, a different, no remote
913 914 if n1 != ma[f]:
914 915 if acceptremote:
915 916 mresult.addfile(
916 917 f,
917 918 mergestatemod.ACTION_REMOVE,
918 919 None,
919 920 b'remote delete',
920 921 )
921 922 else:
922 923 mresult.addfile(
923 924 f,
924 925 mergestatemod.ACTION_CHANGED_DELETED,
925 926 (f, None, f, False, pa.node()),
926 927 b'prompt changed/deleted',
927 928 )
928 929 if branchmerge:
929 930 mresult.addcommitinfo(
930 931 f, b'merge-removal-candidate', b'yes'
931 932 )
932 933 elif n1 == repo.nodeconstants.addednodeid:
933 934 # This file was locally added. We should forget it instead of
934 935 # deleting it.
935 936 mresult.addfile(
936 937 f,
937 938 mergestatemod.ACTION_FORGET,
938 939 None,
939 940 b'remote deleted',
940 941 )
941 942 else:
942 943 mresult.addfile(
943 944 f,
944 945 mergestatemod.ACTION_REMOVE,
945 946 None,
946 947 b'other deleted',
947 948 )
948 949 if branchmerge:
949 950 # the file must be absent after merging,
950 951 # howeber the user might make
951 952 # the file reappear using revert and if they does,
952 953 # we force create a new node
953 954 mresult.addcommitinfo(
954 955 f, b'merge-removal-candidate', b'yes'
955 956 )
956 957
957 958 else: # file not in ancestor, not in remote
958 959 mresult.addfile(
959 960 f,
960 961 mergestatemod.ACTION_KEEP_NEW,
961 962 None,
962 963 b'ancestor missing, remote missing',
963 964 )
964 965
965 966 elif n2: # file exists only on remote side
966 967 if f in copied1:
967 968 pass # we'll deal with it on m1 side
968 969 elif f in branch_copies2.movewithdir:
969 970 f2 = branch_copies2.movewithdir[f]
970 971 if f2 in m1:
971 972 mresult.addfile(
972 973 f2,
973 974 mergestatemod.ACTION_MERGE,
974 975 (f2, f, None, False, pa.node()),
975 976 b'local directory rename, both created',
976 977 )
977 978 else:
978 979 mresult.addfile(
979 980 f2,
980 981 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
981 982 (f, fl2),
982 983 b'local directory rename - get from %s' % f,
983 984 )
984 985 elif f in branch_copies2.copy:
985 986 f2 = branch_copies2.copy[f]
986 987 msg, args = None, None
987 988 if f2 in m2:
988 989 args = (f2, f, f2, False, pa.node())
989 990 msg = b'remote copied from %s' % f2
990 991 else:
991 992 args = (f2, f, f2, True, pa.node())
992 993 msg = b'remote moved from %s' % f2
993 994 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
994 995 elif f not in ma:
995 996 # local unknown, remote created: the logic is described by the
996 997 # following table:
997 998 #
998 999 # force branchmerge different | action
999 1000 # n * * | create
1000 1001 # y n * | create
1001 1002 # y y n | create
1002 1003 # y y y | merge
1003 1004 #
1004 1005 # Checking whether the files are different is expensive, so we
1005 1006 # don't do that when we can avoid it.
1006 1007 if not force:
1007 1008 mresult.addfile(
1008 1009 f,
1009 1010 mergestatemod.ACTION_CREATED,
1010 1011 (fl2,),
1011 1012 b'remote created',
1012 1013 )
1013 1014 elif not branchmerge:
1014 1015 mresult.addfile(
1015 1016 f,
1016 1017 mergestatemod.ACTION_CREATED,
1017 1018 (fl2,),
1018 1019 b'remote created',
1019 1020 )
1020 1021 else:
1021 1022 mresult.addfile(
1022 1023 f,
1023 1024 mergestatemod.ACTION_CREATED_MERGE,
1024 1025 (fl2, pa.node()),
1025 1026 b'remote created, get or merge',
1026 1027 )
1027 1028 elif n2 != ma[f]:
1028 1029 df = None
1029 1030 for d in branch_copies1.dirmove:
1030 1031 if f.startswith(d):
1031 1032 # new file added in a directory that was moved
1032 1033 df = branch_copies1.dirmove[d] + f[len(d) :]
1033 1034 break
1034 1035 if df is not None and df in m1:
1035 1036 mresult.addfile(
1036 1037 df,
1037 1038 mergestatemod.ACTION_MERGE,
1038 1039 (df, f, f, False, pa.node()),
1039 1040 b'local directory rename - respect move '
1040 1041 b'from %s' % f,
1041 1042 )
1042 1043 elif acceptremote:
1043 1044 mresult.addfile(
1044 1045 f,
1045 1046 mergestatemod.ACTION_CREATED,
1046 1047 (fl2,),
1047 1048 b'remote recreating',
1048 1049 )
1049 1050 else:
1050 1051 mresult.addfile(
1051 1052 f,
1052 1053 mergestatemod.ACTION_DELETED_CHANGED,
1053 1054 (None, f, f, False, pa.node()),
1054 1055 b'prompt deleted/changed',
1055 1056 )
1056 1057 if branchmerge:
1057 1058 mresult.addcommitinfo(
1058 1059 f, b'merge-removal-candidate', b'yes'
1059 1060 )
1060 1061 else:
1061 1062 mresult.addfile(
1062 1063 f,
1063 1064 mergestatemod.ACTION_KEEP_ABSENT,
1064 1065 None,
1065 1066 b'local not present, remote unchanged',
1066 1067 )
1067 1068 if branchmerge:
1068 1069 # the file must be absent after merging
1069 1070 # however the user might make
1070 1071 # the file reappear using revert and if they does,
1071 1072 # we force create a new node
1072 1073 mresult.addcommitinfo(f, b'merge-removal-candidate', b'yes')
1073 1074
1074 1075 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1075 1076 # If we are merging, look for path conflicts.
1076 1077 checkpathconflicts(repo, wctx, p2, mresult)
1077 1078
1078 1079 narrowmatch = repo.narrowmatch()
1079 1080 if not narrowmatch.always():
1080 1081 # Updates "actions" in place
1081 1082 _filternarrowactions(narrowmatch, branchmerge, mresult)
1082 1083
1083 1084 renamedelete = branch_copies1.renamedelete
1084 1085 renamedelete.update(branch_copies2.renamedelete)
1085 1086
1086 1087 mresult.updatevalues(diverge, renamedelete)
1087 1088 return mresult
1088 1089
1089 1090
1090 1091 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1091 1092 """Resolves false conflicts where the nodeid changed but the content
1092 1093 remained the same."""
1093 1094 # We force a copy of actions.items() because we're going to mutate
1094 1095 # actions as we resolve trivial conflicts.
1095 1096 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1096 1097 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1097 1098 # local did change but ended up with same content
1098 1099 mresult.addfile(
1099 1100 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1100 1101 )
1101 1102
1102 1103 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1103 1104 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1104 1105 # remote did change but ended up with same content
1105 1106 mresult.removefile(f) # don't get = keep local deleted
1106 1107
1107 1108
1108 1109 def calculateupdates(
1109 1110 repo,
1110 1111 wctx,
1111 1112 mctx,
1112 1113 ancestors,
1113 1114 branchmerge,
1114 1115 force,
1115 1116 acceptremote,
1116 1117 followcopies,
1117 1118 matcher=None,
1118 1119 mergeforce=False,
1119 1120 ):
1120 1121 """
1121 1122 Calculate the actions needed to merge mctx into wctx using ancestors
1122 1123
1123 1124 Uses manifestmerge() to merge manifest and get list of actions required to
1124 1125 perform for merging two manifests. If there are multiple ancestors, uses bid
1125 1126 merge if enabled.
1126 1127
1127 1128 Also filters out actions which are unrequired if repository is sparse.
1128 1129
1129 1130 Returns mergeresult object same as manifestmerge().
1130 1131 """
1131 1132 # Avoid cycle.
1132 1133 from . import sparse
1133 1134
1134 1135 mresult = None
1135 1136 if len(ancestors) == 1: # default
1136 1137 mresult = manifestmerge(
1137 1138 repo,
1138 1139 wctx,
1139 1140 mctx,
1140 1141 ancestors[0],
1141 1142 branchmerge,
1142 1143 force,
1143 1144 matcher,
1144 1145 acceptremote,
1145 1146 followcopies,
1146 1147 )
1147 1148 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1148 1149 if repo.ui.configbool(b'devel', b'debug.abort-update'):
1149 1150 exit(1)
1150 1151
1151 1152 else: # only when merge.preferancestor=* - the default
1152 1153 repo.ui.note(
1153 1154 _(b"note: merging %s and %s using bids from ancestors %s\n")
1154 1155 % (
1155 1156 wctx,
1156 1157 mctx,
1157 1158 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1158 1159 )
1159 1160 )
1160 1161
1161 1162 # mapping filename to bids (action method to list af actions)
1162 1163 # {FILENAME1 : BID1, FILENAME2 : BID2}
1163 1164 # BID is another dictionary which contains
1164 1165 # mapping of following form:
1165 1166 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1166 1167 fbids = {}
1167 1168 mresult = mergeresult()
1168 1169 diverge, renamedelete = None, None
1169 1170 for ancestor in ancestors:
1170 1171 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1171 1172 mresult1 = manifestmerge(
1172 1173 repo,
1173 1174 wctx,
1174 1175 mctx,
1175 1176 ancestor,
1176 1177 branchmerge,
1177 1178 force,
1178 1179 matcher,
1179 1180 acceptremote,
1180 1181 followcopies,
1181 1182 forcefulldiff=True,
1182 1183 )
1183 1184 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1184 1185
1185 1186 # Track the shortest set of warning on the theory that bid
1186 1187 # merge will correctly incorporate more information
1187 1188 if diverge is None or len(mresult1.diverge) < len(diverge):
1188 1189 diverge = mresult1.diverge
1189 1190 if renamedelete is None or len(renamedelete) < len(
1190 1191 mresult1.renamedelete
1191 1192 ):
1192 1193 renamedelete = mresult1.renamedelete
1193 1194
1194 1195 # blindly update final mergeresult commitinfo with what we get
1195 1196 # from mergeresult object for each ancestor
1196 1197 # TODO: some commitinfo depends on what bid merge choose and hence
1197 1198 # we will need to make commitinfo also depend on bid merge logic
1198 1199 mresult._commitinfo.update(mresult1._commitinfo)
1199 1200
1200 1201 for f, a in mresult1.filemap(sort=True):
1201 1202 m, args, msg = a
1202 1203 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m.__bytes__()))
1203 1204 if f in fbids:
1204 1205 d = fbids[f]
1205 1206 if m in d:
1206 1207 d[m].append(a)
1207 1208 else:
1208 1209 d[m] = [a]
1209 1210 else:
1210 1211 fbids[f] = {m: [a]}
1211 1212
1212 1213 # Call for bids
1213 1214 # Pick the best bid for each file
1214 1215 repo.ui.note(
1215 1216 _(b'\nauction for merging merge bids (%d ancestors)\n')
1216 1217 % len(ancestors)
1217 1218 )
1218 1219 for f, bids in sorted(fbids.items()):
1219 1220 if repo.ui.debugflag:
1220 1221 repo.ui.debug(b" list of bids for %s:\n" % f)
1221 1222 for m, l in sorted(bids.items()):
1222 1223 for _f, args, msg in l:
1223 1224 repo.ui.debug(b' %s -> %s\n' % (msg, m.__bytes__()))
1224 1225 # bids is a mapping from action method to list af actions
1225 1226 # Consensus?
1226 1227 if len(bids) == 1: # all bids are the same kind of method
1227 1228 m, l = list(bids.items())[0]
1228 1229 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1229 1230 repo.ui.note(
1230 1231 _(b" %s: consensus for %s\n") % (f, m.__bytes__())
1231 1232 )
1232 1233 mresult.addfile(f, *l[0])
1233 1234 continue
1234 1235 # If keep is an option, just do it.
1235 1236 if mergestatemod.ACTION_KEEP in bids:
1236 1237 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1237 1238 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1238 1239 continue
1239 1240 # If keep absent is an option, just do that
1240 1241 if mergestatemod.ACTION_KEEP_ABSENT in bids:
1241 1242 repo.ui.note(_(b" %s: picking 'keep absent' action\n") % f)
1242 1243 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_ABSENT][0])
1243 1244 continue
1244 1245 # ACTION_KEEP_NEW and ACTION_CHANGED_DELETED are conflicting actions
1245 1246 # as one say that file is new while other says that file was present
1246 1247 # earlier too and has a change delete conflict
1247 1248 # Let's fall back to conflicting ACTION_CHANGED_DELETED and let user
1248 1249 # do the right thing
1249 1250 if (
1250 1251 mergestatemod.ACTION_CHANGED_DELETED in bids
1251 1252 and mergestatemod.ACTION_KEEP_NEW in bids
1252 1253 ):
1253 1254 repo.ui.note(_(b" %s: picking 'changed/deleted' action\n") % f)
1254 1255 mresult.addfile(
1255 1256 f, *bids[mergestatemod.ACTION_CHANGED_DELETED][0]
1256 1257 )
1257 1258 continue
1258 1259 # If keep new is an option, let's just do that
1259 1260 if mergestatemod.ACTION_KEEP_NEW in bids:
1260 1261 repo.ui.note(_(b" %s: picking 'keep new' action\n") % f)
1261 1262 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_NEW][0])
1262 1263 continue
1263 1264 # ACTION_GET and ACTION_DELETE_CHANGED are conflicting actions as
1264 1265 # one action states the file is newer/created on remote side and
1265 1266 # other states that file is deleted locally and changed on remote
1266 1267 # side. Let's fallback and rely on a conflicting action to let user
1267 1268 # do the right thing
1268 1269 if (
1269 1270 mergestatemod.ACTION_DELETED_CHANGED in bids
1270 1271 and mergestatemod.ACTION_GET in bids
1271 1272 ):
1272 1273 repo.ui.note(_(b" %s: picking 'delete/changed' action\n") % f)
1273 1274 mresult.addfile(
1274 1275 f, *bids[mergestatemod.ACTION_DELETED_CHANGED][0]
1275 1276 )
1276 1277 continue
1277 1278 # If there are gets and they all agree [how could they not?], do it.
1278 1279 if mergestatemod.ACTION_GET in bids:
1279 1280 ga0 = bids[mergestatemod.ACTION_GET][0]
1280 1281 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1281 1282 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1282 1283 mresult.addfile(f, *ga0)
1283 1284 continue
1284 1285 # TODO: Consider other simple actions such as mode changes
1285 1286 # Handle inefficient democrazy.
1286 1287 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1287 1288 for m, l in sorted(bids.items()):
1288 1289 for _f, args, msg in l:
1289 1290 repo.ui.note(b' %s -> %s\n' % (msg, m.__bytes__()))
1290 1291 # Pick random action. TODO: Instead, prompt user when resolving
1291 1292 m, l = list(bids.items())[0]
1292 1293 repo.ui.warn(
1293 1294 _(b' %s: ambiguous merge - picked %s action\n')
1294 1295 % (f, m.__bytes__())
1295 1296 )
1296 1297 mresult.addfile(f, *l[0])
1297 1298 continue
1298 1299 repo.ui.note(_(b'end of auction\n\n'))
1299 1300 mresult.updatevalues(diverge, renamedelete)
1300 1301
1301 1302 if wctx.rev() is None:
1302 1303 _forgetremoved(wctx, mctx, branchmerge, mresult)
1303 1304
1304 1305 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1305 1306 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1306 1307
1307 1308 return mresult
1308 1309
1309 1310
1310 1311 def _getcwd():
1311 1312 try:
1312 1313 return encoding.getcwd()
1313 1314 except FileNotFoundError:
1314 1315 return None
1315 1316
1316 1317
1317 1318 def batchremove(repo, wctx, actions):
1318 1319 """apply removes to the working directory
1319 1320
1320 1321 yields tuples for progress updates
1321 1322 """
1322 1323 verbose = repo.ui.verbose
1323 1324 cwd = _getcwd()
1324 1325 i = 0
1325 1326 for f, args, msg in actions:
1326 1327 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1327 1328 if verbose:
1328 1329 repo.ui.note(_(b"removing %s\n") % f)
1329 1330 wctx[f].audit()
1330 1331 try:
1331 1332 wctx[f].remove(ignoremissing=True)
1332 1333 except OSError as inst:
1333 1334 repo.ui.warn(
1334 1335 _(b"update failed to remove %s: %s!\n")
1335 1336 % (f, stringutil.forcebytestr(inst.strerror))
1336 1337 )
1337 1338 if i == 100:
1338 1339 yield i, f
1339 1340 i = 0
1340 1341 i += 1
1341 1342 if i > 0:
1342 1343 yield i, f
1343 1344
1344 1345 if cwd and not _getcwd():
1345 1346 # cwd was removed in the course of removing files; print a helpful
1346 1347 # warning.
1347 1348 repo.ui.warn(
1348 1349 _(
1349 1350 b"current directory was removed\n"
1350 1351 b"(consider changing to repo root: %s)\n"
1351 1352 )
1352 1353 % repo.root
1353 1354 )
1354 1355
1355 1356
1356 1357 def batchget(repo, mctx, wctx, wantfiledata, actions):
1357 1358 """apply gets to the working directory
1358 1359
1359 1360 mctx is the context to get from
1360 1361
1361 1362 Yields arbitrarily many (False, tuple) for progress updates, followed by
1362 1363 exactly one (True, filedata). When wantfiledata is false, filedata is an
1363 1364 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1364 1365 mtime) of the file f written for each action.
1365 1366 """
1366 1367 filedata = {}
1367 1368 verbose = repo.ui.verbose
1368 1369 fctx = mctx.filectx
1369 1370 ui = repo.ui
1370 1371 i = 0
1371 1372 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1372 1373 for f, (flags, backup), msg in actions:
1373 1374 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1374 1375 if verbose:
1375 1376 repo.ui.note(_(b"getting %s\n") % f)
1376 1377
1377 1378 if backup:
1378 1379 # If a file or directory exists with the same name, back that
1379 1380 # up. Otherwise, look to see if there is a file that conflicts
1380 1381 # with a directory this file is in, and if so, back that up.
1381 1382 conflicting = f
1382 1383 if not repo.wvfs.lexists(f):
1383 1384 for p in pathutil.finddirs(f):
1384 1385 if repo.wvfs.isfileorlink(p):
1385 1386 conflicting = p
1386 1387 break
1387 1388 if repo.wvfs.lexists(conflicting):
1388 1389 orig = scmutil.backuppath(ui, repo, conflicting)
1389 1390 util.rename(repo.wjoin(conflicting), orig)
1390 1391 wfctx = wctx[f]
1391 1392 wfctx.clearunknown()
1392 1393 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1393 1394 size = wfctx.write(
1394 1395 fctx(f).data(),
1395 1396 flags,
1396 1397 backgroundclose=True,
1397 1398 atomictemp=atomictemp,
1398 1399 )
1399 1400 if wantfiledata:
1400 1401 # XXX note that there is a race window between the time we
1401 1402 # write the clean data into the file and we stats it. So another
1402 1403 # writing process meddling with the file content right after we
1403 1404 # wrote it could cause bad stat data to be gathered.
1404 1405 #
1405 1406 # They are 2 data we gather here
1406 1407 # - the mode:
1407 1408 # That we actually just wrote, we should not need to read
1408 1409 # it from disk, (except not all mode might have survived
1409 1410 # the disk round-trip, which is another issue: we should
1410 1411 # not depends on this)
1411 1412 # - the mtime,
1412 1413 # On system that support nanosecond precision, the mtime
1413 1414 # could be accurate enough to tell the two writes appart.
1414 1415 # However gathering it in a racy way make the mtime we
1415 1416 # gather "unreliable".
1416 1417 #
1417 1418 # (note: we get the size from the data we write, which is sane)
1418 1419 #
1419 1420 # So in theory the data returned here are fully racy, but in
1420 1421 # practice "it works mostly fine".
1421 1422 #
1422 1423 # Do not be surprised if you end up reading this while looking
1423 1424 # for the causes of some buggy status. Feel free to improve
1424 1425 # this in the future, but we cannot simply stop gathering
1425 1426 # information. Otherwise `hg status` call made after a large `hg
1426 1427 # update` runs would have to redo a similar amount of work to
1427 1428 # restore and compare all files content.
1428 1429 s = wfctx.lstat()
1429 1430 mode = s.st_mode
1430 1431 mtime = timestamp.mtime_of(s)
1431 1432 # for dirstate.update_file's parentfiledata argument:
1432 1433 filedata[f] = (mode, size, mtime)
1433 1434 if i == 100:
1434 1435 yield False, (i, f)
1435 1436 i = 0
1436 1437 i += 1
1437 1438 if i > 0:
1438 1439 yield False, (i, f)
1439 1440 yield True, filedata
1440 1441
1441 1442
1442 1443 def _prefetchfiles(repo, ctx, mresult):
1443 1444 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1444 1445 of merge actions. ``ctx`` is the context being merged in."""
1445 1446
1446 1447 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1447 1448 # don't touch the context to be merged in. 'cd' is skipped, because
1448 1449 # changed/deleted never resolves to something from the remote side.
1449 1450 files = mresult.files(
1450 1451 [
1451 1452 mergestatemod.ACTION_GET,
1452 1453 mergestatemod.ACTION_DELETED_CHANGED,
1453 1454 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1454 1455 mergestatemod.ACTION_MERGE,
1455 1456 ]
1456 1457 )
1457 1458
1458 1459 prefetch = scmutil.prefetchfiles
1459 1460 matchfiles = scmutil.matchfiles
1460 1461 prefetch(
1461 1462 repo,
1462 1463 [
1463 1464 (
1464 1465 ctx.rev(),
1465 1466 matchfiles(repo, files),
1466 1467 )
1467 1468 ],
1468 1469 )
1469 1470
1470 1471
1471 1472 @attr.s(frozen=True)
1472 1473 class updateresult:
1473 1474 updatedcount = attr.ib()
1474 1475 mergedcount = attr.ib()
1475 1476 removedcount = attr.ib()
1476 1477 unresolvedcount = attr.ib()
1477 1478
1478 1479 def isempty(self):
1479 1480 return not (
1480 1481 self.updatedcount
1481 1482 or self.mergedcount
1482 1483 or self.removedcount
1483 1484 or self.unresolvedcount
1484 1485 )
1485 1486
1486 1487
1487 1488 def applyupdates(
1488 1489 repo,
1489 1490 mresult,
1490 1491 wctx,
1491 1492 mctx,
1492 1493 overwrite,
1493 1494 wantfiledata,
1494 1495 labels=None,
1495 1496 ):
1496 1497 """apply the merge action list to the working directory
1497 1498
1498 1499 mresult is a mergeresult object representing result of the merge
1499 1500 wctx is the working copy context
1500 1501 mctx is the context to be merged into the working copy
1501 1502
1502 1503 Return a tuple of (counts, filedata), where counts is a tuple
1503 1504 (updated, merged, removed, unresolved) that describes how many
1504 1505 files were affected by the update, and filedata is as described in
1505 1506 batchget.
1506 1507 """
1507 1508
1508 1509 _prefetchfiles(repo, mctx, mresult)
1509 1510
1510 1511 updated, merged, removed = 0, 0, 0
1511 1512 ms = wctx.mergestate(clean=True)
1512 1513 ms.start(wctx.p1().node(), mctx.node(), labels)
1513 1514
1514 1515 for f, op in mresult.commitinfo.items():
1515 1516 # the other side of filenode was choosen while merging, store this in
1516 1517 # mergestate so that it can be reused on commit
1517 1518 ms.addcommitinfo(f, op)
1518 1519
1519 1520 num_no_op = mresult.len(mergestatemod.MergeAction.NO_OP_ACTIONS)
1520 1521 numupdates = mresult.len() - num_no_op
1521 1522 progress = repo.ui.makeprogress(
1522 1523 _(b'updating'), unit=_(b'files'), total=numupdates
1523 1524 )
1524 1525
1525 1526 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1526 1527 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1527 1528
1528 1529 # record path conflicts
1529 1530 for f, args, msg in mresult.getactions(
1530 1531 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1531 1532 ):
1532 1533 f1, fo = args
1533 1534 s = repo.ui.status
1534 1535 s(
1535 1536 _(
1536 1537 b"%s: path conflict - a file or link has the same name as a "
1537 1538 b"directory\n"
1538 1539 )
1539 1540 % f
1540 1541 )
1541 1542 if fo == b'l':
1542 1543 s(_(b"the local file has been renamed to %s\n") % f1)
1543 1544 else:
1544 1545 s(_(b"the remote file has been renamed to %s\n") % f1)
1545 1546 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1546 1547 ms.addpathconflict(f, f1, fo)
1547 1548 progress.increment(item=f)
1548 1549
1549 1550 # When merging in-memory, we can't support worker processes, so set the
1550 1551 # per-item cost at 0 in that case.
1551 1552 cost = 0 if wctx.isinmemory() else 0.001
1552 1553
1553 1554 # remove in parallel (must come before resolving path conflicts and getting)
1554 1555 prog = worker.worker(
1555 1556 repo.ui,
1556 1557 cost,
1557 1558 batchremove,
1558 1559 (repo, wctx),
1559 1560 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1560 1561 )
1561 1562 for i, item in prog:
1562 1563 progress.increment(step=i, item=item)
1563 1564 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1564 1565
1565 1566 # resolve path conflicts (must come before getting)
1566 1567 for f, args, msg in mresult.getactions(
1567 1568 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1568 1569 ):
1569 1570 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1570 1571 (f0, origf0) = args
1571 1572 if wctx[f0].lexists():
1572 1573 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1573 1574 wctx[f].audit()
1574 1575 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1575 1576 wctx[f0].remove()
1576 1577 progress.increment(item=f)
1577 1578
1578 1579 # get in parallel.
1579 1580 threadsafe = repo.ui.configbool(
1580 1581 b'experimental', b'worker.wdir-get-thread-safe'
1581 1582 )
1582 1583 prog = worker.worker(
1583 1584 repo.ui,
1584 1585 cost,
1585 1586 batchget,
1586 1587 (repo, mctx, wctx, wantfiledata),
1587 1588 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1588 1589 threadsafe=threadsafe,
1589 1590 hasretval=True,
1590 1591 )
1591 1592 getfiledata = {}
1592 1593 for final, res in prog:
1593 1594 if final:
1594 1595 getfiledata = res
1595 1596 else:
1596 1597 i, item = res
1597 1598 progress.increment(step=i, item=item)
1598 1599
1599 1600 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1600 1601 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1601 1602
1602 1603 # forget (manifest only, just log it) (must come first)
1603 1604 for f, args, msg in mresult.getactions(
1604 1605 (mergestatemod.ACTION_FORGET,), sort=True
1605 1606 ):
1606 1607 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1607 1608 progress.increment(item=f)
1608 1609
1609 1610 # re-add (manifest only, just log it)
1610 1611 for f, args, msg in mresult.getactions(
1611 1612 (mergestatemod.ACTION_ADD,), sort=True
1612 1613 ):
1613 1614 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1614 1615 progress.increment(item=f)
1615 1616
1616 1617 # re-add/mark as modified (manifest only, just log it)
1617 1618 for f, args, msg in mresult.getactions(
1618 1619 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1619 1620 ):
1620 1621 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1621 1622 progress.increment(item=f)
1622 1623
1623 1624 # keep (noop, just log it)
1624 1625 for a in mergestatemod.MergeAction.NO_OP_ACTIONS:
1625 1626 for f, args, msg in mresult.getactions((a,), sort=True):
1626 1627 repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a.__bytes__()))
1627 1628 # no progress
1628 1629
1629 1630 # directory rename, move local
1630 1631 for f, args, msg in mresult.getactions(
1631 1632 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1632 1633 ):
1633 1634 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1634 1635 progress.increment(item=f)
1635 1636 f0, flags = args
1636 1637 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1637 1638 wctx[f].audit()
1638 1639 wctx[f].write(wctx.filectx(f0).data(), flags)
1639 1640 wctx[f0].remove()
1640 1641
1641 1642 # local directory rename, get
1642 1643 for f, args, msg in mresult.getactions(
1643 1644 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1644 1645 ):
1645 1646 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1646 1647 progress.increment(item=f)
1647 1648 f0, flags = args
1648 1649 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1649 1650 wctx[f].write(mctx.filectx(f0).data(), flags)
1650 1651
1651 1652 # exec
1652 1653 for f, args, msg in mresult.getactions(
1653 1654 (mergestatemod.ACTION_EXEC,), sort=True
1654 1655 ):
1655 1656 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1656 1657 progress.increment(item=f)
1657 1658 (flags,) = args
1658 1659 wctx[f].audit()
1659 1660 wctx[f].setflags(b'l' in flags, b'x' in flags)
1660 1661
1661 1662 moves = []
1662 1663
1663 1664 # 'cd' and 'dc' actions are treated like other merge conflicts
1664 1665 mergeactions = list(
1665 1666 mresult.getactions(
1666 1667 [
1667 1668 mergestatemod.ACTION_CHANGED_DELETED,
1668 1669 mergestatemod.ACTION_DELETED_CHANGED,
1669 1670 mergestatemod.ACTION_MERGE,
1670 1671 ],
1671 1672 sort=True,
1672 1673 )
1673 1674 )
1674 1675 for f, args, msg in mergeactions:
1675 1676 f1, f2, fa, move, anc = args
1676 1677 if f == b'.hgsubstate': # merged internally
1677 1678 continue
1678 1679 if f1 is None:
1679 1680 fcl = filemerge.absentfilectx(wctx, fa)
1680 1681 else:
1681 1682 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1682 1683 fcl = wctx[f1]
1683 1684 if f2 is None:
1684 1685 fco = filemerge.absentfilectx(mctx, fa)
1685 1686 else:
1686 1687 fco = mctx[f2]
1687 1688 actx = repo[anc]
1688 1689 if fa in actx:
1689 1690 fca = actx[fa]
1690 1691 else:
1691 1692 # TODO: move to absentfilectx
1692 1693 fca = repo.filectx(f1, fileid=nullrev)
1693 1694 ms.add(fcl, fco, fca, f)
1694 1695 if f1 != f and move:
1695 1696 moves.append(f1)
1696 1697
1697 1698 # remove renamed files after safely stored
1698 1699 for f in moves:
1699 1700 if wctx[f].lexists():
1700 1701 repo.ui.debug(b"removing %s\n" % f)
1701 1702 wctx[f].audit()
1702 1703 wctx[f].remove()
1703 1704
1704 1705 # these actions updates the file
1705 1706 updated = mresult.len(
1706 1707 (
1707 1708 mergestatemod.ACTION_GET,
1708 1709 mergestatemod.ACTION_EXEC,
1709 1710 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1710 1711 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1711 1712 )
1712 1713 )
1713 1714
1714 1715 try:
1715 1716 for f, args, msg in mergeactions:
1716 1717 repo.ui.debug(b" %s: %s -> m\n" % (f, msg))
1717 1718 ms.addcommitinfo(f, {b'merged': b'yes'})
1718 1719 progress.increment(item=f)
1719 1720 if f == b'.hgsubstate': # subrepo states need updating
1720 1721 subrepoutil.submerge(
1721 1722 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1722 1723 )
1723 1724 continue
1724 1725 wctx[f].audit()
1725 1726 ms.resolve(f, wctx)
1726 1727
1727 1728 except error.InterventionRequired:
1728 1729 # If the user has merge.on-failure=halt, catch the error and close the
1729 1730 # merge state "properly".
1730 1731 pass
1731 1732 finally:
1732 1733 ms.commit()
1733 1734
1734 1735 unresolved = ms.unresolvedcount()
1735 1736
1736 1737 msupdated, msmerged, msremoved = ms.counts()
1737 1738 updated += msupdated
1738 1739 merged += msmerged
1739 1740 removed += msremoved
1740 1741
1741 1742 extraactions = ms.actions()
1742 1743
1743 1744 progress.complete()
1744 1745 return (
1745 1746 updateresult(updated, merged, removed, unresolved),
1746 1747 getfiledata,
1747 1748 extraactions,
1748 1749 )
1749 1750
1750 1751
1751 1752 def _advertisefsmonitor(repo, num_gets, p1node):
1752 1753 # Advertise fsmonitor when its presence could be useful.
1753 1754 #
1754 1755 # We only advertise when performing an update from an empty working
1755 1756 # directory. This typically only occurs during initial clone.
1756 1757 #
1757 1758 # We give users a mechanism to disable the warning in case it is
1758 1759 # annoying.
1759 1760 #
1760 1761 # We only allow on Linux and MacOS because that's where fsmonitor is
1761 1762 # considered stable.
1762 1763 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1763 1764 fsmonitorthreshold = repo.ui.configint(
1764 1765 b'fsmonitor', b'warn_update_file_count'
1765 1766 )
1766 1767 # avoid cycle dirstate -> sparse -> merge -> dirstate
1767 1768 dirstate_rustmod = policy.importrust("dirstate")
1768 1769
1769 1770 if dirstate_rustmod is not None:
1770 1771 # When using rust status, fsmonitor becomes necessary at higher sizes
1771 1772 fsmonitorthreshold = repo.ui.configint(
1772 1773 b'fsmonitor',
1773 1774 b'warn_update_file_count_rust',
1774 1775 )
1775 1776
1776 1777 try:
1777 1778 # avoid cycle: extensions -> cmdutil -> merge
1778 1779 from . import extensions
1779 1780
1780 1781 extensions.find(b'fsmonitor')
1781 1782 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1782 1783 # We intentionally don't look at whether fsmonitor has disabled
1783 1784 # itself because a) fsmonitor may have already printed a warning
1784 1785 # b) we only care about the config state here.
1785 1786 except KeyError:
1786 1787 fsmonitorenabled = False
1787 1788
1788 1789 if (
1789 1790 fsmonitorwarning
1790 1791 and not fsmonitorenabled
1791 1792 and p1node == repo.nullid
1792 1793 and num_gets >= fsmonitorthreshold
1793 1794 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1794 1795 ):
1795 1796 repo.ui.warn(
1796 1797 _(
1797 1798 b'(warning: large working directory being used without '
1798 1799 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1799 1800 b'see "hg help -e fsmonitor")\n'
1800 1801 )
1801 1802 )
1802 1803
1803 1804
1804 1805 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1805 1806 UPDATECHECK_NONE = b'none'
1806 1807 UPDATECHECK_LINEAR = b'linear'
1807 1808 UPDATECHECK_NO_CONFLICT = b'noconflict'
1808 1809
1809 1810
1810 1811 def _update(
1811 1812 repo,
1812 1813 node,
1813 1814 branchmerge,
1814 1815 force,
1815 1816 ancestor=None,
1816 1817 mergeancestor=False,
1817 1818 labels=None,
1818 1819 matcher=None,
1819 1820 mergeforce=False,
1820 1821 updatedirstate=True,
1821 1822 updatecheck=None,
1822 1823 wc=None,
1823 1824 ):
1824 1825 """
1825 1826 Perform a merge between the working directory and the given node
1826 1827
1827 1828 node = the node to update to
1828 1829 branchmerge = whether to merge between branches
1829 1830 force = whether to force branch merging or file overwriting
1830 1831 matcher = a matcher to filter file lists (dirstate not updated)
1831 1832 mergeancestor = whether it is merging with an ancestor. If true,
1832 1833 we should accept the incoming changes for any prompts that occur.
1833 1834 If false, merging with an ancestor (fast-forward) is only allowed
1834 1835 between different named branches. This flag is used by rebase extension
1835 1836 as a temporary fix and should be avoided in general.
1836 1837 labels = labels to use for local, other, and base
1837 1838 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1838 1839 this is True, then 'force' should be True as well.
1839 1840
1840 1841 The table below shows all the behaviors of the update command given the
1841 1842 -c/--check and -C/--clean or no options, whether the working directory is
1842 1843 dirty, whether a revision is specified, and the relationship of the parent
1843 1844 rev to the target rev (linear or not). Match from top first. The -n
1844 1845 option doesn't exist on the command line, but represents the
1845 1846 experimental.updatecheck=noconflict option.
1846 1847
1847 1848 This logic is tested by test-update-branches.t.
1848 1849
1849 1850 -c -C -n -m dirty rev linear | result
1850 1851 y y * * * * * | (1)
1851 1852 y * y * * * * | (1)
1852 1853 y * * y * * * | (1)
1853 1854 * y y * * * * | (1)
1854 1855 * y * y * * * | (1)
1855 1856 * * y y * * * | (1)
1856 1857 * * * * * n n | x
1857 1858 * * * * n * * | ok
1858 1859 n n n n y * y | merge
1859 1860 n n n n y y n | (2)
1860 1861 n n n y y * * | merge
1861 1862 n n y n y * * | merge if no conflict
1862 1863 n y n n y * * | discard
1863 1864 y n n n y * * | (3)
1864 1865
1865 1866 x = can't happen
1866 1867 * = don't-care
1867 1868 1 = incompatible options (checked in commands.py)
1868 1869 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1869 1870 3 = abort: uncommitted changes (checked in commands.py)
1870 1871
1871 1872 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1872 1873 to repo[None] if None is passed.
1873 1874
1874 1875 Return the same tuple as applyupdates().
1875 1876 """
1876 1877 # Avoid cycle.
1877 1878 from . import sparse
1878 1879
1879 1880 # This function used to find the default destination if node was None, but
1880 1881 # that's now in destutil.py.
1881 1882 assert node is not None
1882 1883 if not branchmerge and not force:
1883 1884 # TODO: remove the default once all callers that pass branchmerge=False
1884 1885 # and force=False pass a value for updatecheck. We may want to allow
1885 1886 # updatecheck='abort' to better suppport some of these callers.
1886 1887 if updatecheck is None:
1887 1888 updatecheck = UPDATECHECK_LINEAR
1888 1889 okay = (UPDATECHECK_NONE, UPDATECHECK_LINEAR, UPDATECHECK_NO_CONFLICT)
1889 1890 if updatecheck not in okay:
1890 1891 msg = r'Invalid updatecheck %r (can accept %r)'
1891 1892 msg %= (updatecheck, okay)
1892 1893 raise ValueError(msg)
1893 1894 if wc is not None and wc.isinmemory():
1894 1895 maybe_wlock = util.nullcontextmanager()
1895 1896 else:
1896 1897 maybe_wlock = repo.wlock()
1897 1898 with maybe_wlock:
1898 1899 if wc is None:
1899 1900 wc = repo[None]
1900 1901 pl = wc.parents()
1901 1902 p1 = pl[0]
1902 1903 p2 = repo[node]
1903 1904 if ancestor is not None:
1904 1905 pas = [repo[ancestor]]
1905 1906 else:
1906 1907 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1907 1908 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1908 1909 pas = [repo[anc] for anc in (sorted(cahs) or [repo.nullid])]
1909 1910 else:
1910 1911 pas = [p1.ancestor(p2, warn=branchmerge)]
1911 1912
1912 1913 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1913 1914
1914 1915 overwrite = force and not branchmerge
1915 1916 ### check phase
1916 1917 if not overwrite:
1917 1918 if len(pl) > 1:
1918 1919 raise error.StateError(_(b"outstanding uncommitted merge"))
1919 1920 ms = wc.mergestate()
1920 1921 if ms.unresolvedcount():
1921 1922 msg = _(b"outstanding merge conflicts")
1922 1923 hint = _(b"use 'hg resolve' to resolve")
1923 1924 raise error.StateError(msg, hint=hint)
1924 1925 if branchmerge:
1925 1926 m_a = _(b"merging with a working directory ancestor has no effect")
1926 1927 if pas == [p2]:
1927 1928 raise error.Abort(m_a)
1928 1929 elif pas == [p1]:
1929 1930 if not mergeancestor and wc.branch() == p2.branch():
1930 1931 msg = _(b"nothing to merge")
1931 1932 hint = _(b"use 'hg update' or check 'hg heads'")
1932 1933 raise error.Abort(msg, hint=hint)
1933 1934 if not force and (wc.files() or wc.deleted()):
1934 1935 msg = _(b"uncommitted changes")
1935 1936 hint = _(b"use 'hg status' to list changes")
1936 1937 raise error.StateError(msg, hint=hint)
1937 1938 if not wc.isinmemory():
1938 1939 for s in sorted(wc.substate):
1939 1940 wc.sub(s).bailifchanged()
1940 1941
1941 1942 elif not overwrite:
1942 1943 if p1 == p2: # no-op update
1943 1944 # call the hooks and exit early
1944 1945 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1945 1946 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1946 1947 return updateresult(0, 0, 0, 0)
1947 1948
1948 1949 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1949 1950 [p1],
1950 1951 [p2],
1951 1952 ): # nonlinear
1952 1953 dirty = wc.dirty(missing=True)
1953 1954 if dirty:
1954 1955 # Branching is a bit strange to ensure we do the minimal
1955 1956 # amount of call to obsutil.foreground.
1956 1957 foreground = obsutil.foreground(repo, [p1.node()])
1957 1958 # note: the <node> variable contains a random identifier
1958 1959 if repo[node].node() in foreground:
1959 1960 pass # allow updating to successors
1960 1961 else:
1961 1962 msg = _(b"uncommitted changes")
1962 1963 hint = _(b"commit or update --clean to discard changes")
1963 1964 raise error.UpdateAbort(msg, hint=hint)
1964 1965 else:
1965 1966 # Allow jumping branches if clean and specific rev given
1966 1967 pass
1967 1968
1968 1969 if overwrite:
1969 1970 pas = [wc]
1970 1971 elif not branchmerge:
1971 1972 pas = [p1]
1972 1973
1973 1974 # deprecated config: merge.followcopies
1974 1975 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1975 1976 if overwrite:
1976 1977 followcopies = False
1977 1978 elif not pas[0]:
1978 1979 followcopies = False
1979 1980 if not branchmerge and not wc.dirty(missing=True):
1980 1981 followcopies = False
1981 1982
1982 1983 ### calculate phase
1983 1984 mresult = calculateupdates(
1984 1985 repo,
1985 1986 wc,
1986 1987 p2,
1987 1988 pas,
1988 1989 branchmerge,
1989 1990 force,
1990 1991 mergeancestor,
1991 1992 followcopies,
1992 1993 matcher=matcher,
1993 1994 mergeforce=mergeforce,
1994 1995 )
1995 1996
1996 1997 if updatecheck == UPDATECHECK_NO_CONFLICT:
1997 1998 if mresult.hasconflicts():
1998 1999 msg = _(b"conflicting changes")
1999 2000 hint = _(b"commit or update --clean to discard changes")
2000 2001 raise error.StateError(msg, hint=hint)
2001 2002
2002 2003 # Prompt and create actions. Most of this is in the resolve phase
2003 2004 # already, but we can't handle .hgsubstate in filemerge or
2004 2005 # subrepoutil.submerge yet so we have to keep prompting for it.
2005 2006 vals = mresult.getfile(b'.hgsubstate')
2006 2007 if vals:
2007 2008 f = b'.hgsubstate'
2008 2009 m, args, msg = vals
2009 2010 prompts = filemerge.partextras(labels)
2010 2011 prompts[b'f'] = f
2011 2012 if m == mergestatemod.ACTION_CHANGED_DELETED:
2012 2013 if repo.ui.promptchoice(
2013 2014 _(
2014 2015 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2015 2016 b"use (c)hanged version or (d)elete?"
2016 2017 b"$$ &Changed $$ &Delete"
2017 2018 )
2018 2019 % prompts,
2019 2020 0,
2020 2021 ):
2021 2022 mresult.addfile(
2022 2023 f,
2023 2024 mergestatemod.ACTION_REMOVE,
2024 2025 None,
2025 2026 b'prompt delete',
2026 2027 )
2027 2028 elif f in p1:
2028 2029 mresult.addfile(
2029 2030 f,
2030 2031 mergestatemod.ACTION_ADD_MODIFIED,
2031 2032 None,
2032 2033 b'prompt keep',
2033 2034 )
2034 2035 else:
2035 2036 mresult.addfile(
2036 2037 f,
2037 2038 mergestatemod.ACTION_ADD,
2038 2039 None,
2039 2040 b'prompt keep',
2040 2041 )
2041 2042 elif m == mergestatemod.ACTION_DELETED_CHANGED:
2042 2043 f1, f2, fa, move, anc = args
2043 2044 flags = p2[f2].flags()
2044 2045 if (
2045 2046 repo.ui.promptchoice(
2046 2047 _(
2047 2048 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2048 2049 b"use (c)hanged version or leave (d)eleted?"
2049 2050 b"$$ &Changed $$ &Deleted"
2050 2051 )
2051 2052 % prompts,
2052 2053 0,
2053 2054 )
2054 2055 == 0
2055 2056 ):
2056 2057 mresult.addfile(
2057 2058 f,
2058 2059 mergestatemod.ACTION_GET,
2059 2060 (flags, False),
2060 2061 b'prompt recreating',
2061 2062 )
2062 2063 else:
2063 2064 mresult.removefile(f)
2064 2065
2065 2066 if not util.fscasesensitive(repo.path):
2066 2067 # check collision between files only in p2 for clean update
2067 2068 if not branchmerge and (
2068 2069 force or not wc.dirty(missing=True, branch=False)
2069 2070 ):
2070 2071 _checkcollision(repo, p2.manifest(), None)
2071 2072 else:
2072 2073 _checkcollision(repo, wc.manifest(), mresult)
2073 2074
2074 2075 # divergent renames
2075 2076 for f, fl in sorted(mresult.diverge.items()):
2076 2077 repo.ui.warn(
2077 2078 _(
2078 2079 b"note: possible conflict - %s was renamed "
2079 2080 b"multiple times to:\n"
2080 2081 )
2081 2082 % f
2082 2083 )
2083 2084 for nf in sorted(fl):
2084 2085 repo.ui.warn(b" %s\n" % nf)
2085 2086
2086 2087 # rename and delete
2087 2088 for f, fl in sorted(mresult.renamedelete.items()):
2088 2089 repo.ui.warn(
2089 2090 _(
2090 2091 b"note: possible conflict - %s was deleted "
2091 2092 b"and renamed to:\n"
2092 2093 )
2093 2094 % f
2094 2095 )
2095 2096 for nf in sorted(fl):
2096 2097 repo.ui.warn(b" %s\n" % nf)
2097 2098
2098 2099 ### apply phase
2099 2100 if not branchmerge: # just jump to the new rev
2100 2101 fp1, fp2, xp1, xp2 = fp2, repo.nullid, xp2, b''
2101 2102 # If we're doing a partial update, we need to skip updating
2102 2103 # the dirstate.
2103 2104 always = matcher is None or matcher.always()
2104 2105 updatedirstate = updatedirstate and always and not wc.isinmemory()
2105 2106 if updatedirstate:
2106 2107 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2107 2108 # note that we're in the middle of an update
2108 2109 repo.vfs.write(b'updatestate', p2.hex())
2109 2110
2110 2111 _advertisefsmonitor(
2111 2112 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2112 2113 )
2113 2114
2114 2115 wantfiledata = updatedirstate and not branchmerge
2115 2116 stats, getfiledata, extraactions = applyupdates(
2116 2117 repo,
2117 2118 mresult,
2118 2119 wc,
2119 2120 p2,
2120 2121 overwrite,
2121 2122 wantfiledata,
2122 2123 labels=labels,
2123 2124 )
2124 2125
2125 2126 if updatedirstate:
2126 2127 if extraactions:
2127 2128 for k, acts in extraactions.items():
2128 2129 for a in acts:
2129 2130 mresult.addfile(a[0], k, *a[1:])
2130 2131 if k == mergestatemod.ACTION_GET and wantfiledata:
2131 2132 # no filedata until mergestate is updated to provide it
2132 2133 for a in acts:
2133 2134 getfiledata[a[0]] = None
2134 2135
2135 2136 assert len(getfiledata) == (
2136 2137 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
2137 2138 )
2138 2139 with repo.dirstate.parentchange():
2139 2140 ### Filter Filedata
2140 2141 #
2141 2142 # We gathered "cache" information for the clean file while
2142 2143 # updating them: mtime, size and mode.
2143 2144 #
2144 2145 # At the time this comment is written, they are various issues
2145 2146 # with how we gather the `mode` and `mtime` information (see
2146 2147 # the comment in `batchget`).
2147 2148 #
2148 2149 # We are going to smooth one of this issue here : mtime ambiguity.
2149 2150 #
2150 2151 # i.e. even if the mtime gathered during `batchget` was
2151 2152 # correct[1] a change happening right after it could change the
2152 2153 # content while keeping the same mtime[2].
2153 2154 #
2154 2155 # When we reach the current code, the "on disk" part of the
2155 2156 # update operation is finished. We still assume that no other
2156 2157 # process raced that "on disk" part, but we want to at least
2157 2158 # prevent later file change to alter the content of the file
2158 2159 # right after the update operation. So quickly that the same
2159 2160 # mtime is record for the operation.
2160 2161 # To prevent such ambiguity to happens, we will only keep the
2161 2162 # "file data" for files with mtime that are stricly in the past,
2162 2163 # i.e. whose mtime is strictly lower than the current time.
2163 2164 #
2164 2165 # This protect us from race conditions from operation that could
2165 2166 # run right after this one, especially other Mercurial
2166 2167 # operation that could be waiting for the wlock to touch files
2167 2168 # content and the dirstate.
2168 2169 #
2169 2170 # In an ideal world, we could only get reliable information in
2170 2171 # `getfiledata` (from `getbatch`), however the current approach
2171 2172 # have been a successful compromise since many years.
2172 2173 #
2173 2174 # At the time this comment is written, not using any "cache"
2174 2175 # file data at all here would not be viable. As it would result is
2175 2176 # a very large amount of work (equivalent to the previous `hg
2176 2177 # update` during the next status after an update).
2177 2178 #
2178 2179 # [1] the current code cannot grantee that the `mtime` and
2179 2180 # `mode` are correct, but the result is "okay in practice".
2180 2181 # (see the comment in `batchget`). #
2181 2182 #
2182 2183 # [2] using nano-second precision can greatly help here because
2183 2184 # it makes the "different write with same mtime" issue
2184 2185 # virtually vanish. However, dirstate v1 cannot store such
2185 2186 # precision and a bunch of python-runtime, operating-system and
2186 2187 # filesystem does not provide use with such precision, so we
2187 2188 # have to operate as if it wasn't available.
2188 2189 if getfiledata:
2189 2190 ambiguous_mtime = {}
2190 2191 now = timestamp.get_fs_now(repo.vfs)
2191 2192 if now is None:
2192 2193 # we can't write to the FS, so we won't actually update
2193 2194 # the dirstate content anyway, no need to put cache
2194 2195 # information.
2195 2196 getfiledata = None
2196 2197 else:
2197 2198 now_sec = now[0]
2198 2199 for f, m in getfiledata.items():
2199 2200 if m is not None and m[2][0] >= now_sec:
2200 2201 ambiguous_mtime[f] = (m[0], m[1], None)
2201 2202 for f, m in ambiguous_mtime.items():
2202 2203 getfiledata[f] = m
2203 2204
2204 2205 repo.setparents(fp1, fp2)
2205 2206 mergestatemod.recordupdates(
2206 2207 repo, mresult.actionsdict, branchmerge, getfiledata
2207 2208 )
2208 2209 # update completed, clear state
2209 2210 util.unlink(repo.vfs.join(b'updatestate'))
2210 2211
2211 2212 if not branchmerge:
2212 2213 repo.dirstate.setbranch(p2.branch())
2213 2214
2214 2215 # If we're updating to a location, clean up any stale temporary includes
2215 2216 # (ex: this happens during hg rebase --abort).
2216 2217 if not branchmerge:
2217 2218 sparse.prunetemporaryincludes(repo)
2218 2219
2219 2220 if updatedirstate:
2220 2221 repo.hook(
2221 2222 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2222 2223 )
2223 2224 return stats
2224 2225
2225 2226
2226 2227 def merge(ctx, labels=None, force=False, wc=None):
2227 2228 """Merge another topological branch into the working copy.
2228 2229
2229 2230 force = whether the merge was run with 'merge --force' (deprecated)
2230 2231 """
2231 2232
2232 2233 return _update(
2233 2234 ctx.repo(),
2234 2235 ctx.rev(),
2235 2236 labels=labels,
2236 2237 branchmerge=True,
2237 2238 force=force,
2238 2239 mergeforce=force,
2239 2240 wc=wc,
2240 2241 )
2241 2242
2242 2243
2243 2244 def update(ctx, updatecheck=None, wc=None):
2244 2245 """Do a regular update to the given commit, aborting if there are conflicts.
2245 2246
2246 2247 The 'updatecheck' argument can be used to control what to do in case of
2247 2248 conflicts.
2248 2249
2249 2250 Note: This is a new, higher-level update() than the one that used to exist
2250 2251 in this module. That function is now called _update(). You can hopefully
2251 2252 replace your callers to use this new update(), or clean_update(), merge(),
2252 2253 revert_to(), or graft().
2253 2254 """
2254 2255 return _update(
2255 2256 ctx.repo(),
2256 2257 ctx.rev(),
2257 2258 branchmerge=False,
2258 2259 force=False,
2259 2260 labels=[b'working copy', b'destination', b'working copy parent'],
2260 2261 updatecheck=updatecheck,
2261 2262 wc=wc,
2262 2263 )
2263 2264
2264 2265
2265 2266 def clean_update(ctx, wc=None):
2266 2267 """Do a clean update to the given commit.
2267 2268
2268 2269 This involves updating to the commit and discarding any changes in the
2269 2270 working copy.
2270 2271 """
2271 2272 return _update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2272 2273
2273 2274
2274 2275 def revert_to(ctx, matcher=None, wc=None):
2275 2276 """Revert the working copy to the given commit.
2276 2277
2277 2278 The working copy will keep its current parent(s) but its content will
2278 2279 be the same as in the given commit.
2279 2280 """
2280 2281
2281 2282 return _update(
2282 2283 ctx.repo(),
2283 2284 ctx.rev(),
2284 2285 branchmerge=False,
2285 2286 force=True,
2286 2287 updatedirstate=False,
2287 2288 matcher=matcher,
2288 2289 wc=wc,
2289 2290 )
2290 2291
2291 2292
2292 2293 def graft(
2293 2294 repo,
2294 2295 ctx,
2295 2296 base=None,
2296 2297 labels=None,
2297 2298 keepparent=False,
2298 2299 keepconflictparent=False,
2299 2300 wctx=None,
2300 2301 ):
2301 2302 """Do a graft-like merge.
2302 2303
2303 2304 This is a merge where the merge ancestor is chosen such that one
2304 2305 or more changesets are grafted onto the current changeset. In
2305 2306 addition to the merge, this fixes up the dirstate to include only
2306 2307 a single parent (if keepparent is False) and tries to duplicate any
2307 2308 renames/copies appropriately.
2308 2309
2309 2310 ctx - changeset to rebase
2310 2311 base - merge base, or ctx.p1() if not specified
2311 2312 labels - merge labels eg ['local', 'graft']
2312 2313 keepparent - keep second parent if any
2313 2314 keepconflictparent - if unresolved, keep parent used for the merge
2314 2315
2315 2316 """
2316 2317 # If we're grafting a descendant onto an ancestor, be sure to pass
2317 2318 # mergeancestor=True to update. This does two things: 1) allows the merge if
2318 2319 # the destination is the same as the parent of the ctx (so we can use graft
2319 2320 # to copy commits), and 2) informs update that the incoming changes are
2320 2321 # newer than the destination so it doesn't prompt about "remote changed foo
2321 2322 # which local deleted".
2322 2323 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2323 2324 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2324 2325 wctx = wctx or repo[None]
2325 2326 pctx = wctx.p1()
2326 2327 base = base or ctx.p1()
2327 2328 mergeancestor = (
2328 2329 repo.changelog.isancestor(pctx.node(), ctx.node())
2329 2330 or pctx.rev() == base.rev()
2330 2331 )
2331 2332
2332 2333 stats = _update(
2333 2334 repo,
2334 2335 ctx.node(),
2335 2336 True,
2336 2337 True,
2337 2338 base.node(),
2338 2339 mergeancestor=mergeancestor,
2339 2340 labels=labels,
2340 2341 wc=wctx,
2341 2342 )
2342 2343
2343 2344 if keepconflictparent and stats.unresolvedcount:
2344 2345 pother = ctx.node()
2345 2346 else:
2346 2347 pother = repo.nullid
2347 2348 parents = ctx.parents()
2348 2349 if keepparent and len(parents) == 2 and base in parents:
2349 2350 parents.remove(base)
2350 2351 pother = parents[0].node()
2351 2352 # Never set both parents equal to each other
2352 2353 if pother == pctx.node():
2353 2354 pother = repo.nullid
2354 2355
2355 2356 if wctx.isinmemory():
2356 2357 wctx.setparents(pctx.node(), pother)
2357 2358 # fix up dirstate for copies and renames
2358 2359 copies.graftcopies(wctx, ctx, base)
2359 2360 else:
2360 2361 with repo.dirstate.parentchange():
2361 2362 repo.setparents(pctx.node(), pother)
2362 2363 repo.dirstate.write(repo.currenttransaction())
2363 2364 # fix up dirstate for copies and renames
2364 2365 copies.graftcopies(wctx, ctx, base)
2365 2366 return stats
2366 2367
2367 2368
2368 2369 def back_out(ctx, parent=None, wc=None):
2369 2370 if parent is None:
2370 2371 if ctx.p2() is not None:
2371 2372 msg = b"must specify parent of merge commit to back out"
2372 2373 raise error.ProgrammingError(msg)
2373 2374 parent = ctx.p1()
2374 2375 return _update(
2375 2376 ctx.repo(),
2376 2377 parent,
2377 2378 branchmerge=True,
2378 2379 force=True,
2379 2380 ancestor=ctx.node(),
2380 2381 mergeancestor=False,
2381 2382 )
2382 2383
2383 2384
2384 2385 def purge(
2385 2386 repo,
2386 2387 matcher,
2387 2388 unknown=True,
2388 2389 ignored=False,
2389 2390 removeemptydirs=True,
2390 2391 removefiles=True,
2391 2392 abortonerror=False,
2392 2393 noop=False,
2393 2394 confirm=False,
2394 2395 ):
2395 2396 """Purge the working directory of untracked files.
2396 2397
2397 2398 ``matcher`` is a matcher configured to scan the working directory -
2398 2399 potentially a subset.
2399 2400
2400 2401 ``unknown`` controls whether unknown files should be purged.
2401 2402
2402 2403 ``ignored`` controls whether ignored files should be purged.
2403 2404
2404 2405 ``removeemptydirs`` controls whether empty directories should be removed.
2405 2406
2406 2407 ``removefiles`` controls whether files are removed.
2407 2408
2408 2409 ``abortonerror`` causes an exception to be raised if an error occurs
2409 2410 deleting a file or directory.
2410 2411
2411 2412 ``noop`` controls whether to actually remove files. If not defined, actions
2412 2413 will be taken.
2413 2414
2414 2415 ``confirm`` ask confirmation before actually removing anything.
2415 2416
2416 2417 Returns an iterable of relative paths in the working directory that were
2417 2418 or would be removed.
2418 2419 """
2419 2420
2420 2421 def remove(removefn, path):
2421 2422 try:
2422 2423 removefn(path)
2423 2424 except OSError:
2424 2425 m = _(b'%s cannot be removed') % path
2425 2426 if abortonerror:
2426 2427 raise error.Abort(m)
2427 2428 else:
2428 2429 repo.ui.warn(_(b'warning: %s\n') % m)
2429 2430
2430 2431 # There's no API to copy a matcher. So mutate the passed matcher and
2431 2432 # restore it when we're done.
2432 2433 oldtraversedir = matcher.traversedir
2433 2434
2434 2435 res = []
2435 2436
2436 2437 try:
2437 2438 if removeemptydirs:
2438 2439 directories = []
2439 2440 matcher.traversedir = directories.append
2440 2441
2441 2442 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2442 2443
2443 2444 if confirm:
2444 2445 msg = None
2445 2446 nb_ignored = len(status.ignored)
2446 2447 nb_unknown = len(status.unknown)
2447 2448 if nb_unknown and nb_ignored:
2448 2449 msg = _(b"permanently delete %d unknown and %d ignored files?")
2449 2450 msg %= (nb_unknown, nb_ignored)
2450 2451 elif nb_unknown:
2451 2452 msg = _(b"permanently delete %d unknown files?")
2452 2453 msg %= nb_unknown
2453 2454 elif nb_ignored:
2454 2455 msg = _(b"permanently delete %d ignored files?")
2455 2456 msg %= nb_ignored
2456 2457 elif removeemptydirs:
2457 2458 dir_count = 0
2458 2459 for f in directories:
2459 2460 if matcher(f) and not repo.wvfs.listdir(f):
2460 2461 dir_count += 1
2461 2462 if dir_count:
2462 2463 msg = _(
2463 2464 b"permanently delete at least %d empty directories?"
2464 2465 )
2465 2466 msg %= dir_count
2466 2467 if msg is None:
2467 2468 return res
2468 2469 else:
2469 2470 msg += b" (yN)$$ &Yes $$ &No"
2470 2471 if repo.ui.promptchoice(msg, default=1) == 1:
2471 2472 raise error.CanceledError(_(b'removal cancelled'))
2472 2473
2473 2474 if removefiles:
2474 2475 for f in sorted(status.unknown + status.ignored):
2475 2476 if not noop:
2476 2477 repo.ui.note(_(b'removing file %s\n') % f)
2477 2478 remove(repo.wvfs.unlink, f)
2478 2479 res.append(f)
2479 2480
2480 2481 if removeemptydirs:
2481 2482 for f in sorted(directories, reverse=True):
2482 2483 if matcher(f) and not repo.wvfs.listdir(f):
2483 2484 if not noop:
2484 2485 repo.ui.note(_(b'removing directory %s\n') % f)
2485 2486 remove(repo.wvfs.rmdir, f)
2486 2487 res.append(f)
2487 2488
2488 2489 return res
2489 2490
2490 2491 finally:
2491 2492 matcher.traversedir = oldtraversedir
@@ -1,380 +1,381 b''
1 1 import contextlib
2 2 import errno
3 3 import os
4 4 import posixpath
5 5 import stat
6 6
7 7 from .i18n import _
8 8 from . import (
9 9 encoding,
10 10 error,
11 11 policy,
12 12 pycompat,
13 13 util,
14 14 )
15 15
16 16 if pycompat.TYPE_CHECKING:
17 17 from typing import (
18 18 Any,
19 19 Callable,
20 20 Iterator,
21 21 Optional,
22 22 )
23 23
24 24
25 25 rustdirs = policy.importrust('dirstate', 'Dirs')
26 26 parsers = policy.importmod('parsers')
27 27
28 28
29 29 def _lowerclean(s):
30 30 # type: (bytes) -> bytes
31 31 return encoding.hfsignoreclean(s.lower())
32 32
33 33
34 34 class pathauditor:
35 35 """ensure that a filesystem path contains no banned components.
36 36 the following properties of a path are checked:
37 37
38 38 - ends with a directory separator
39 39 - under top-level .hg
40 40 - starts at the root of a windows drive
41 41 - contains ".."
42 42
43 43 More check are also done about the file system states:
44 44 - traverses a symlink (e.g. a/symlink_here/b)
45 45 - inside a nested repository (a callback can be used to approve
46 46 some nested repositories, e.g., subrepositories)
47 47
48 48 The file system checks are only done when 'realfs' is set to True (the
49 49 default). They should be disable then we are auditing path for operation on
50 50 stored history.
51 51
52 52 If 'cached' is set to True, audited paths and sub-directories are cached.
53 53 Be careful to not keep the cache of unmanaged directories for long because
54 54 audited paths may be replaced with symlinks.
55 55 """
56 56
57 57 def __init__(self, root, callback=None, realfs=True, cached=False):
58 58 self.audited = set()
59 self.auditeddir = set()
59 self.auditeddir = dict()
60 60 self.root = root
61 61 self._realfs = realfs
62 62 self._cached = cached
63 63 self.callback = callback
64 64 if os.path.lexists(root) and not util.fscasesensitive(root):
65 65 self.normcase = util.normcase
66 66 else:
67 67 self.normcase = lambda x: x
68 68
69 69 def __call__(self, path, mode=None):
70 70 # type: (bytes, Optional[Any]) -> None
71 71 """Check the relative path.
72 72 path may contain a pattern (e.g. foodir/**.txt)"""
73 73
74 74 path = util.localpath(path)
75 75 if path in self.audited:
76 76 return
77 77 # AIX ignores "/" at end of path, others raise EISDIR.
78 78 if util.endswithsep(path):
79 79 raise error.InputError(
80 80 _(b"path ends in directory separator: %s") % path
81 81 )
82 82 parts = util.splitpath(path)
83 83 if (
84 84 os.path.splitdrive(path)[0]
85 85 or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'')
86 86 or pycompat.ospardir in parts
87 87 ):
88 88 raise error.InputError(
89 89 _(b"path contains illegal component: %s") % path
90 90 )
91 91 # Windows shortname aliases
92 92 if b"~" in path:
93 93 for p in parts:
94 94 if b"~" in p:
95 95 first, last = p.split(b"~", 1)
96 96 if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
97 97 raise error.InputError(
98 98 _(b"path contains illegal component: %s") % path
99 99 )
100 100 if b'.hg' in _lowerclean(path):
101 101 lparts = [_lowerclean(p) for p in parts]
102 102 for p in b'.hg', b'.hg.':
103 103 if p in lparts[1:]:
104 104 pos = lparts.index(p)
105 105 base = os.path.join(*parts[:pos])
106 106 raise error.InputError(
107 107 _(b"path '%s' is inside nested repo %r")
108 108 % (path, pycompat.bytestr(base))
109 109 )
110 110
111 111 if self._realfs:
112 112 parts.pop()
113 113 # It's important that we check the path parts starting from the root.
114 114 # We don't want to add "foo/bar/baz" to auditeddir before checking if
115 115 # there's a "foo/.hg" directory. This also means we won't accidentally
116 116 # traverse a symlink into some other filesystem (which is potentially
117 117 # expensive to access).
118 118 for i in range(len(parts)):
119 119 prefix = pycompat.ossep.join(parts[: i + 1])
120 120 if prefix in self.auditeddir:
121 continue
122 res = self._checkfs_exists(prefix, path)
123 if self._cached:
124 self.auditeddir.add(prefix)
121 res = self.auditeddir[prefix]
122 else:
123 res = self._checkfs_exists(prefix, path)
124 if self._cached:
125 self.auditeddir[prefix] = res
125 126 if not res:
126 127 break
127 128
128 129 if self._cached:
129 130 self.audited.add(path)
130 131
131 132 def _checkfs_exists(self, prefix, path):
132 133 # type: (bytes, bytes) -> bool
133 134 """raise exception if a file system backed check fails.
134 135
135 136 Return a bool that indicates that the directory (or file) exists."""
136 137 curpath = os.path.join(self.root, prefix)
137 138 try:
138 139 st = os.lstat(curpath)
139 140 except OSError as err:
140 141 if err.errno == errno.ENOENT:
141 142 return False
142 143 # EINVAL can be raised as invalid path syntax under win32.
143 144 # They must be ignored for patterns can be checked too.
144 145 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
145 146 raise
146 147 else:
147 148 if stat.S_ISLNK(st.st_mode):
148 149 msg = _(b'path %r traverses symbolic link %r') % (
149 150 pycompat.bytestr(path),
150 151 pycompat.bytestr(prefix),
151 152 )
152 153 raise error.Abort(msg)
153 154 elif stat.S_ISDIR(st.st_mode) and os.path.isdir(
154 155 os.path.join(curpath, b'.hg')
155 156 ):
156 157 if not self.callback or not self.callback(curpath):
157 158 msg = _(b"path '%s' is inside nested repo %r")
158 159 raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
159 160 return True
160 161
161 162 def check(self, path):
162 163 # type: (bytes) -> bool
163 164 try:
164 165 self(path)
165 166 return True
166 167 except (OSError, error.Abort):
167 168 return False
168 169
169 170 @contextlib.contextmanager
170 171 def cached(self):
171 172 if self._cached:
172 173 yield
173 174 else:
174 175 try:
175 176 self._cached = True
176 177 yield
177 178 finally:
178 179 self.audited.clear()
179 180 self.auditeddir.clear()
180 181 self._cached = False
181 182
182 183
183 184 def canonpath(root, cwd, myname, auditor=None):
184 185 # type: (bytes, bytes, bytes, Optional[pathauditor]) -> bytes
185 186 """return the canonical path of myname, given cwd and root
186 187
187 188 >>> def check(root, cwd, myname):
188 189 ... a = pathauditor(root, realfs=False)
189 190 ... try:
190 191 ... return canonpath(root, cwd, myname, a)
191 192 ... except error.Abort:
192 193 ... return 'aborted'
193 194 >>> def unixonly(root, cwd, myname, expected='aborted'):
194 195 ... if pycompat.iswindows:
195 196 ... return expected
196 197 ... return check(root, cwd, myname)
197 198 >>> def winonly(root, cwd, myname, expected='aborted'):
198 199 ... if not pycompat.iswindows:
199 200 ... return expected
200 201 ... return check(root, cwd, myname)
201 202 >>> winonly(b'd:\\\\repo', b'c:\\\\dir', b'filename')
202 203 'aborted'
203 204 >>> winonly(b'c:\\\\repo', b'c:\\\\dir', b'filename')
204 205 'aborted'
205 206 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'filename')
206 207 'aborted'
207 208 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'repo\\\\filename',
208 209 ... b'filename')
209 210 'filename'
210 211 >>> winonly(b'c:\\\\repo', b'c:\\\\repo', b'filename', b'filename')
211 212 'filename'
212 213 >>> winonly(b'c:\\\\repo', b'c:\\\\repo\\\\subdir', b'filename',
213 214 ... b'subdir/filename')
214 215 'subdir/filename'
215 216 >>> unixonly(b'/repo', b'/dir', b'filename')
216 217 'aborted'
217 218 >>> unixonly(b'/repo', b'/', b'filename')
218 219 'aborted'
219 220 >>> unixonly(b'/repo', b'/', b'repo/filename', b'filename')
220 221 'filename'
221 222 >>> unixonly(b'/repo', b'/repo', b'filename', b'filename')
222 223 'filename'
223 224 >>> unixonly(b'/repo', b'/repo/subdir', b'filename', b'subdir/filename')
224 225 'subdir/filename'
225 226 """
226 227 if util.endswithsep(root):
227 228 rootsep = root
228 229 else:
229 230 rootsep = root + pycompat.ossep
230 231 name = myname
231 232 if not os.path.isabs(name):
232 233 name = os.path.join(root, cwd, name)
233 234 name = os.path.normpath(name)
234 235 if auditor is None:
235 236 auditor = pathauditor(root)
236 237 if name != rootsep and name.startswith(rootsep):
237 238 name = name[len(rootsep) :]
238 239 auditor(name)
239 240 return util.pconvert(name)
240 241 elif name == root:
241 242 return b''
242 243 else:
243 244 # Determine whether `name' is in the hierarchy at or beneath `root',
244 245 # by iterating name=dirname(name) until that causes no change (can't
245 246 # check name == '/', because that doesn't work on windows). The list
246 247 # `rel' holds the reversed list of components making up the relative
247 248 # file name we want.
248 249 rel = []
249 250 while True:
250 251 try:
251 252 s = util.samefile(name, root)
252 253 except OSError:
253 254 s = False
254 255 if s:
255 256 if not rel:
256 257 # name was actually the same as root (maybe a symlink)
257 258 return b''
258 259 rel.reverse()
259 260 name = os.path.join(*rel)
260 261 auditor(name)
261 262 return util.pconvert(name)
262 263 dirname, basename = util.split(name)
263 264 rel.append(basename)
264 265 if dirname == name:
265 266 break
266 267 name = dirname
267 268
268 269 # A common mistake is to use -R, but specify a file relative to the repo
269 270 # instead of cwd. Detect that case, and provide a hint to the user.
270 271 hint = None
271 272 try:
272 273 if cwd != root:
273 274 canonpath(root, root, myname, auditor)
274 275 relpath = util.pathto(root, cwd, b'')
275 276 if relpath.endswith(pycompat.ossep):
276 277 relpath = relpath[:-1]
277 278 hint = _(b"consider using '--cwd %s'") % relpath
278 279 except error.Abort:
279 280 pass
280 281
281 282 raise error.Abort(
282 283 _(b"%s not under root '%s'") % (myname, root), hint=hint
283 284 )
284 285
285 286
286 287 def normasprefix(path):
287 288 # type: (bytes) -> bytes
288 289 """normalize the specified path as path prefix
289 290
290 291 Returned value can be used safely for "p.startswith(prefix)",
291 292 "p[len(prefix):]", and so on.
292 293
293 294 For efficiency, this expects "path" argument to be already
294 295 normalized by "os.path.normpath", "os.path.realpath", and so on.
295 296
296 297 See also issue3033 for detail about need of this function.
297 298
298 299 >>> normasprefix(b'/foo/bar').replace(pycompat.ossep, b'/')
299 300 '/foo/bar/'
300 301 >>> normasprefix(b'/').replace(pycompat.ossep, b'/')
301 302 '/'
302 303 """
303 304 d, p = os.path.splitdrive(path)
304 305 if len(p) != len(pycompat.ossep):
305 306 return path + pycompat.ossep
306 307 else:
307 308 return path
308 309
309 310
310 311 def finddirs(path):
311 312 # type: (bytes) -> Iterator[bytes]
312 313 pos = path.rfind(b'/')
313 314 while pos != -1:
314 315 yield path[:pos]
315 316 pos = path.rfind(b'/', 0, pos)
316 317 yield b''
317 318
318 319
319 320 class dirs:
320 321 '''a multiset of directory names from a set of file paths'''
321 322
322 323 def __init__(self, map, only_tracked=False):
323 324 """
324 325 a dict map indicates a dirstate while a list indicates a manifest
325 326 """
326 327 self._dirs = {}
327 328 addpath = self.addpath
328 329 if isinstance(map, dict) and only_tracked:
329 330 for f, s in map.items():
330 331 if s.state != b'r':
331 332 addpath(f)
332 333 elif only_tracked:
333 334 msg = b"`only_tracked` is only supported with a dict source"
334 335 raise error.ProgrammingError(msg)
335 336 else:
336 337 for f in map:
337 338 addpath(f)
338 339
339 340 def addpath(self, path):
340 341 # type: (bytes) -> None
341 342 dirs = self._dirs
342 343 for base in finddirs(path):
343 344 if base.endswith(b'/'):
344 345 raise ValueError(
345 346 "found invalid consecutive slashes in path: %r" % base
346 347 )
347 348 if base in dirs:
348 349 dirs[base] += 1
349 350 return
350 351 dirs[base] = 1
351 352
352 353 def delpath(self, path):
353 354 # type: (bytes) -> None
354 355 dirs = self._dirs
355 356 for base in finddirs(path):
356 357 if dirs[base] > 1:
357 358 dirs[base] -= 1
358 359 return
359 360 del dirs[base]
360 361
361 362 def __iter__(self):
362 363 return iter(self._dirs)
363 364
364 365 def __contains__(self, d):
365 366 # type: (bytes) -> bool
366 367 return d in self._dirs
367 368
368 369
369 370 if util.safehasattr(parsers, 'dirs'):
370 371 dirs = parsers.dirs
371 372
372 373 if rustdirs is not None:
373 374 dirs = rustdirs
374 375
375 376
376 377 # forward two methods from posixpath that do what we need, but we'd
377 378 # rather not let our internals know that we're thinking in posix terms
378 379 # - instead we'll let them be oblivious.
379 380 join = posixpath.join
380 381 dirname = posixpath.dirname # type: Callable[[bytes], bytes]
General Comments 0
You need to be logged in to leave comments. Login now