##// END OF EJS Templates
fsmonitor: increase the threshold before we recommend it, when using rust...
Valentin Gatien-Baron -
r46034:c1d6e930 default
parent child Browse files
Show More
@@ -1,984 +1,986 b''
1 1 # __init__.py - fsmonitor initialization and overrides
2 2 #
3 3 # Copyright 2013-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
9 9
10 10 Integrates the file-watching program Watchman with Mercurial to produce faster
11 11 status results.
12 12
13 13 On a particular Linux system, for a real-world repository with over 400,000
14 14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
15 15 system, with fsmonitor it takes about 0.3 seconds.
16 16
17 17 fsmonitor requires no configuration -- it will tell Watchman about your
18 18 repository as necessary. You'll need to install Watchman from
19 19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
20 20
21 21 fsmonitor is incompatible with the largefiles and eol extensions, and
22 22 will disable itself if any of those are active.
23 23
24 24 The following configuration options exist:
25 25
26 26 ::
27 27
28 28 [fsmonitor]
29 29 mode = {off, on, paranoid}
30 30
31 31 When `mode = off`, fsmonitor will disable itself (similar to not loading the
32 32 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
33 33 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
34 34 and ensure that the results are consistent.
35 35
36 36 ::
37 37
38 38 [fsmonitor]
39 39 timeout = (float)
40 40
41 41 A value, in seconds, that determines how long fsmonitor will wait for Watchman
42 42 to return results. Defaults to `2.0`.
43 43
44 44 ::
45 45
46 46 [fsmonitor]
47 47 blacklistusers = (list of userids)
48 48
49 49 A list of usernames for which fsmonitor will disable itself altogether.
50 50
51 51 ::
52 52
53 53 [fsmonitor]
54 54 walk_on_invalidate = (boolean)
55 55
56 56 Whether or not to walk the whole repo ourselves when our cached state has been
57 57 invalidated, for example when Watchman has been restarted or .hgignore rules
58 58 have been changed. Walking the repo in that case can result in competing for
59 59 I/O with Watchman. For large repos it is recommended to set this value to
60 60 false. You may wish to set this to true if you have a very fast filesystem
61 61 that can outpace the IPC overhead of getting the result data for the full repo
62 62 from Watchman. Defaults to false.
63 63
64 64 ::
65 65
66 66 [fsmonitor]
67 67 warn_when_unused = (boolean)
68 68
69 69 Whether to print a warning during certain operations when fsmonitor would be
70 70 beneficial to performance but isn't enabled.
71 71
72 72 ::
73 73
74 74 [fsmonitor]
75 75 warn_update_file_count = (integer)
76 # or when mercurial is built with rust support
77 warn_update_file_count_rust = (integer)
76 78
77 79 If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will
78 80 be printed during working directory updates if this many files will be
79 81 created.
80 82 '''
81 83
82 84 # Platforms Supported
83 85 # ===================
84 86 #
85 87 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
86 88 # even under severe loads.
87 89 #
88 90 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
89 91 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
90 92 # user testing under normal loads.
91 93 #
92 94 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
93 95 # very little testing has been done.
94 96 #
95 97 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
96 98 #
97 99 # Known Issues
98 100 # ============
99 101 #
100 102 # * fsmonitor will disable itself if any of the following extensions are
101 103 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
102 104 # * fsmonitor will produce incorrect results if nested repos that are not
103 105 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
104 106 #
105 107 # The issues related to nested repos and subrepos are probably not fundamental
106 108 # ones. Patches to fix them are welcome.
107 109
108 110 from __future__ import absolute_import
109 111
110 112 import codecs
111 113 import os
112 114 import stat
113 115 import sys
114 116 import tempfile
115 117 import weakref
116 118
117 119 from mercurial.i18n import _
118 120 from mercurial.node import hex
119 121 from mercurial.pycompat import open
120 122 from mercurial import (
121 123 context,
122 124 encoding,
123 125 error,
124 126 extensions,
125 127 localrepo,
126 128 merge,
127 129 pathutil,
128 130 pycompat,
129 131 registrar,
130 132 scmutil,
131 133 util,
132 134 )
133 135 from mercurial import match as matchmod
134 136 from mercurial.utils import (
135 137 hashutil,
136 138 stringutil,
137 139 )
138 140
139 141 from . import (
140 142 pywatchman,
141 143 state,
142 144 watchmanclient,
143 145 )
144 146
145 147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
146 148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
147 149 # be specifying the version(s) of Mercurial they are tested with, or
148 150 # leave the attribute unspecified.
149 151 testedwith = b'ships-with-hg-core'
150 152
151 153 configtable = {}
152 154 configitem = registrar.configitem(configtable)
153 155
154 156 configitem(
155 157 b'fsmonitor', b'mode', default=b'on',
156 158 )
157 159 configitem(
158 160 b'fsmonitor', b'walk_on_invalidate', default=False,
159 161 )
160 162 configitem(
161 163 b'fsmonitor', b'timeout', default=b'2',
162 164 )
163 165 configitem(
164 166 b'fsmonitor', b'blacklistusers', default=list,
165 167 )
166 168 configitem(
167 169 b'fsmonitor', b'watchman_exe', default=b'watchman',
168 170 )
169 171 configitem(
170 172 b'fsmonitor', b'verbose', default=True, experimental=True,
171 173 )
172 174 configitem(
173 175 b'experimental', b'fsmonitor.transaction_notify', default=False,
174 176 )
175 177
176 178 # This extension is incompatible with the following blacklisted extensions
177 179 # and will disable itself when encountering one of these:
178 180 _blacklist = [b'largefiles', b'eol']
179 181
180 182
181 183 def debuginstall(ui, fm):
182 184 fm.write(
183 185 b"fsmonitor-watchman",
184 186 _(b"fsmonitor checking for watchman binary... (%s)\n"),
185 187 ui.configpath(b"fsmonitor", b"watchman_exe"),
186 188 )
187 189 root = tempfile.mkdtemp()
188 190 c = watchmanclient.client(ui, root)
189 191 err = None
190 192 try:
191 193 v = c.command(b"version")
192 194 fm.write(
193 195 b"fsmonitor-watchman-version",
194 196 _(b" watchman binary version %s\n"),
195 197 pycompat.bytestr(v["version"]),
196 198 )
197 199 except watchmanclient.Unavailable as e:
198 200 err = stringutil.forcebytestr(e)
199 201 fm.condwrite(
200 202 err,
201 203 b"fsmonitor-watchman-error",
202 204 _(b" watchman binary missing or broken: %s\n"),
203 205 err,
204 206 )
205 207 return 1 if err else 0
206 208
207 209
208 210 def _handleunavailable(ui, state, ex):
209 211 """Exception handler for Watchman interaction exceptions"""
210 212 if isinstance(ex, watchmanclient.Unavailable):
211 213 # experimental config: fsmonitor.verbose
212 214 if ex.warn and ui.configbool(b'fsmonitor', b'verbose'):
213 215 if b'illegal_fstypes' not in stringutil.forcebytestr(ex):
214 216 ui.warn(stringutil.forcebytestr(ex) + b'\n')
215 217 if ex.invalidate:
216 218 state.invalidate()
217 219 # experimental config: fsmonitor.verbose
218 220 if ui.configbool(b'fsmonitor', b'verbose'):
219 221 ui.log(
220 222 b'fsmonitor',
221 223 b'Watchman unavailable: %s\n',
222 224 stringutil.forcebytestr(ex.msg),
223 225 )
224 226 else:
225 227 ui.log(
226 228 b'fsmonitor',
227 229 b'Watchman exception: %s\n',
228 230 stringutil.forcebytestr(ex),
229 231 )
230 232
231 233
232 234 def _hashignore(ignore):
233 235 """Calculate hash for ignore patterns and filenames
234 236
235 237 If this information changes between Mercurial invocations, we can't
236 238 rely on Watchman information anymore and have to re-scan the working
237 239 copy.
238 240
239 241 """
240 242 sha1 = hashutil.sha1()
241 243 sha1.update(pycompat.byterepr(ignore))
242 244 return pycompat.sysbytes(sha1.hexdigest())
243 245
244 246
245 247 _watchmanencoding = pywatchman.encoding.get_local_encoding()
246 248 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
247 249 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
248 250
249 251
250 252 def _watchmantofsencoding(path):
251 253 """Fix path to match watchman and local filesystem encoding
252 254
253 255 watchman's paths encoding can differ from filesystem encoding. For example,
254 256 on Windows, it's always utf-8.
255 257 """
256 258 try:
257 259 decoded = path.decode(_watchmanencoding)
258 260 except UnicodeDecodeError as e:
259 261 raise error.Abort(
260 262 stringutil.forcebytestr(e), hint=b'watchman encoding error'
261 263 )
262 264
263 265 try:
264 266 encoded = decoded.encode(_fsencoding, 'strict')
265 267 except UnicodeEncodeError as e:
266 268 raise error.Abort(stringutil.forcebytestr(e))
267 269
268 270 return encoded
269 271
270 272
271 273 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
272 274 '''Replacement for dirstate.walk, hooking into Watchman.
273 275
274 276 Whenever full is False, ignored is False, and the Watchman client is
275 277 available, use Watchman combined with saved state to possibly return only a
276 278 subset of files.'''
277 279
278 280 def bail(reason):
279 281 self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason)
280 282 return orig(match, subrepos, unknown, ignored, full=True)
281 283
282 284 if full:
283 285 return bail(b'full rewalk requested')
284 286 if ignored:
285 287 return bail(b'listing ignored files')
286 288 if not self._watchmanclient.available():
287 289 return bail(b'client unavailable')
288 290 state = self._fsmonitorstate
289 291 clock, ignorehash, notefiles = state.get()
290 292 if not clock:
291 293 if state.walk_on_invalidate:
292 294 return bail(b'no clock')
293 295 # Initial NULL clock value, see
294 296 # https://facebook.github.io/watchman/docs/clockspec.html
295 297 clock = b'c:0:0'
296 298 notefiles = []
297 299
298 300 ignore = self._ignore
299 301 dirignore = self._dirignore
300 302 if unknown:
301 303 if _hashignore(ignore) != ignorehash and clock != b'c:0:0':
302 304 # ignore list changed -- can't rely on Watchman state any more
303 305 if state.walk_on_invalidate:
304 306 return bail(b'ignore rules changed')
305 307 notefiles = []
306 308 clock = b'c:0:0'
307 309 else:
308 310 # always ignore
309 311 ignore = util.always
310 312 dirignore = util.always
311 313
312 314 matchfn = match.matchfn
313 315 matchalways = match.always()
314 316 dmap = self._map
315 317 if util.safehasattr(dmap, b'_map'):
316 318 # for better performance, directly access the inner dirstate map if the
317 319 # standard dirstate implementation is in use.
318 320 dmap = dmap._map
319 321 nonnormalset = self._map.nonnormalset
320 322
321 323 copymap = self._map.copymap
322 324 getkind = stat.S_IFMT
323 325 dirkind = stat.S_IFDIR
324 326 regkind = stat.S_IFREG
325 327 lnkkind = stat.S_IFLNK
326 328 join = self._join
327 329 normcase = util.normcase
328 330 fresh_instance = False
329 331
330 332 exact = skipstep3 = False
331 333 if match.isexact(): # match.exact
332 334 exact = True
333 335 dirignore = util.always # skip step 2
334 336 elif match.prefix(): # match.match, no patterns
335 337 skipstep3 = True
336 338
337 339 if not exact and self._checkcase:
338 340 # note that even though we could receive directory entries, we're only
339 341 # interested in checking if a file with the same name exists. So only
340 342 # normalize files if possible.
341 343 normalize = self._normalizefile
342 344 skipstep3 = False
343 345 else:
344 346 normalize = None
345 347
346 348 # step 1: find all explicit files
347 349 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
348 350
349 351 skipstep3 = skipstep3 and not (work or dirsnotfound)
350 352 work = [d for d in work if not dirignore(d[0])]
351 353
352 354 if not work and (exact or skipstep3):
353 355 for s in subrepos:
354 356 del results[s]
355 357 del results[b'.hg']
356 358 return results
357 359
358 360 # step 2: query Watchman
359 361 try:
360 362 # Use the user-configured timeout for the query.
361 363 # Add a little slack over the top of the user query to allow for
362 364 # overheads while transferring the data
363 365 self._watchmanclient.settimeout(state.timeout + 0.1)
364 366 result = self._watchmanclient.command(
365 367 b'query',
366 368 {
367 369 b'fields': [b'mode', b'mtime', b'size', b'exists', b'name'],
368 370 b'since': clock,
369 371 b'expression': [
370 372 b'not',
371 373 [
372 374 b'anyof',
373 375 [b'dirname', b'.hg'],
374 376 [b'name', b'.hg', b'wholename'],
375 377 ],
376 378 ],
377 379 b'sync_timeout': int(state.timeout * 1000),
378 380 b'empty_on_fresh_instance': state.walk_on_invalidate,
379 381 },
380 382 )
381 383 except Exception as ex:
382 384 _handleunavailable(self._ui, state, ex)
383 385 self._watchmanclient.clearconnection()
384 386 return bail(b'exception during run')
385 387 else:
386 388 # We need to propagate the last observed clock up so that we
387 389 # can use it for our next query
388 390 state.setlastclock(pycompat.sysbytes(result[b'clock']))
389 391 if result[b'is_fresh_instance']:
390 392 if state.walk_on_invalidate:
391 393 state.invalidate()
392 394 return bail(b'fresh instance')
393 395 fresh_instance = True
394 396 # Ignore any prior noteable files from the state info
395 397 notefiles = []
396 398
397 399 # for file paths which require normalization and we encounter a case
398 400 # collision, we store our own foldmap
399 401 if normalize:
400 402 foldmap = {normcase(k): k for k in results}
401 403
402 404 switch_slashes = pycompat.ossep == b'\\'
403 405 # The order of the results is, strictly speaking, undefined.
404 406 # For case changes on a case insensitive filesystem we may receive
405 407 # two entries, one with exists=True and another with exists=False.
406 408 # The exists=True entries in the same response should be interpreted
407 409 # as being happens-after the exists=False entries due to the way that
408 410 # Watchman tracks files. We use this property to reconcile deletes
409 411 # for name case changes.
410 412 for entry in result[b'files']:
411 413 fname = entry[b'name']
412 414
413 415 # Watchman always give us a str. Normalize to bytes on Python 3
414 416 # using Watchman's encoding, if needed.
415 417 if not isinstance(fname, bytes):
416 418 fname = fname.encode(_watchmanencoding)
417 419
418 420 if _fixencoding:
419 421 fname = _watchmantofsencoding(fname)
420 422
421 423 if switch_slashes:
422 424 fname = fname.replace(b'\\', b'/')
423 425 if normalize:
424 426 normed = normcase(fname)
425 427 fname = normalize(fname, True, True)
426 428 foldmap[normed] = fname
427 429 fmode = entry[b'mode']
428 430 fexists = entry[b'exists']
429 431 kind = getkind(fmode)
430 432
431 433 if b'/.hg/' in fname or fname.endswith(b'/.hg'):
432 434 return bail(b'nested-repo-detected')
433 435
434 436 if not fexists:
435 437 # if marked as deleted and we don't already have a change
436 438 # record, mark it as deleted. If we already have an entry
437 439 # for fname then it was either part of walkexplicit or was
438 440 # an earlier result that was a case change
439 441 if (
440 442 fname not in results
441 443 and fname in dmap
442 444 and (matchalways or matchfn(fname))
443 445 ):
444 446 results[fname] = None
445 447 elif kind == dirkind:
446 448 if fname in dmap and (matchalways or matchfn(fname)):
447 449 results[fname] = None
448 450 elif kind == regkind or kind == lnkkind:
449 451 if fname in dmap:
450 452 if matchalways or matchfn(fname):
451 453 results[fname] = entry
452 454 elif (matchalways or matchfn(fname)) and not ignore(fname):
453 455 results[fname] = entry
454 456 elif fname in dmap and (matchalways or matchfn(fname)):
455 457 results[fname] = None
456 458
457 459 # step 3: query notable files we don't already know about
458 460 # XXX try not to iterate over the entire dmap
459 461 if normalize:
460 462 # any notable files that have changed case will already be handled
461 463 # above, so just check membership in the foldmap
462 464 notefiles = {
463 465 normalize(f, True, True)
464 466 for f in notefiles
465 467 if normcase(f) not in foldmap
466 468 }
467 469 visit = {
468 470 f
469 471 for f in notefiles
470 472 if (f not in results and matchfn(f) and (f in dmap or not ignore(f)))
471 473 }
472 474
473 475 if not fresh_instance:
474 476 if matchalways:
475 477 visit.update(f for f in nonnormalset if f not in results)
476 478 visit.update(f for f in copymap if f not in results)
477 479 else:
478 480 visit.update(
479 481 f for f in nonnormalset if f not in results and matchfn(f)
480 482 )
481 483 visit.update(f for f in copymap if f not in results and matchfn(f))
482 484 else:
483 485 if matchalways:
484 486 visit.update(
485 487 f for f, st in pycompat.iteritems(dmap) if f not in results
486 488 )
487 489 visit.update(f for f in copymap if f not in results)
488 490 else:
489 491 visit.update(
490 492 f
491 493 for f, st in pycompat.iteritems(dmap)
492 494 if f not in results and matchfn(f)
493 495 )
494 496 visit.update(f for f in copymap if f not in results and matchfn(f))
495 497
496 498 audit = pathutil.pathauditor(self._root, cached=True).check
497 499 auditpass = [f for f in visit if audit(f)]
498 500 auditpass.sort()
499 501 auditfail = visit.difference(auditpass)
500 502 for f in auditfail:
501 503 results[f] = None
502 504
503 505 nf = iter(auditpass)
504 506 for st in util.statfiles([join(f) for f in auditpass]):
505 507 f = next(nf)
506 508 if st or f in dmap:
507 509 results[f] = st
508 510
509 511 for s in subrepos:
510 512 del results[s]
511 513 del results[b'.hg']
512 514 return results
513 515
514 516
515 517 def overridestatus(
516 518 orig,
517 519 self,
518 520 node1=b'.',
519 521 node2=None,
520 522 match=None,
521 523 ignored=False,
522 524 clean=False,
523 525 unknown=False,
524 526 listsubrepos=False,
525 527 ):
526 528 listignored = ignored
527 529 listclean = clean
528 530 listunknown = unknown
529 531
530 532 def _cmpsets(l1, l2):
531 533 try:
532 534 if b'FSMONITOR_LOG_FILE' in encoding.environ:
533 535 fn = encoding.environ[b'FSMONITOR_LOG_FILE']
534 536 f = open(fn, b'wb')
535 537 else:
536 538 fn = b'fsmonitorfail.log'
537 539 f = self.vfs.open(fn, b'wb')
538 540 except (IOError, OSError):
539 541 self.ui.warn(_(b'warning: unable to write to %s\n') % fn)
540 542 return
541 543
542 544 try:
543 545 for i, (s1, s2) in enumerate(zip(l1, l2)):
544 546 if set(s1) != set(s2):
545 547 f.write(b'sets at position %d are unequal\n' % i)
546 548 f.write(b'watchman returned: %s\n' % s1)
547 549 f.write(b'stat returned: %s\n' % s2)
548 550 finally:
549 551 f.close()
550 552
551 553 if isinstance(node1, context.changectx):
552 554 ctx1 = node1
553 555 else:
554 556 ctx1 = self[node1]
555 557 if isinstance(node2, context.changectx):
556 558 ctx2 = node2
557 559 else:
558 560 ctx2 = self[node2]
559 561
560 562 working = ctx2.rev() is None
561 563 parentworking = working and ctx1 == self[b'.']
562 564 match = match or matchmod.always()
563 565
564 566 # Maybe we can use this opportunity to update Watchman's state.
565 567 # Mercurial uses workingcommitctx and/or memctx to represent the part of
566 568 # the workingctx that is to be committed. So don't update the state in
567 569 # that case.
568 570 # HG_PENDING is set in the environment when the dirstate is being updated
569 571 # in the middle of a transaction; we must not update our state in that
570 572 # case, or we risk forgetting about changes in the working copy.
571 573 updatestate = (
572 574 parentworking
573 575 and match.always()
574 576 and not isinstance(ctx2, (context.workingcommitctx, context.memctx))
575 577 and b'HG_PENDING' not in encoding.environ
576 578 )
577 579
578 580 try:
579 581 if self._fsmonitorstate.walk_on_invalidate:
580 582 # Use a short timeout to query the current clock. If that
581 583 # takes too long then we assume that the service will be slow
582 584 # to answer our query.
583 585 # walk_on_invalidate indicates that we prefer to walk the
584 586 # tree ourselves because we can ignore portions that Watchman
585 587 # cannot and we tend to be faster in the warmer buffer cache
586 588 # cases.
587 589 self._watchmanclient.settimeout(0.1)
588 590 else:
589 591 # Give Watchman more time to potentially complete its walk
590 592 # and return the initial clock. In this mode we assume that
591 593 # the filesystem will be slower than parsing a potentially
592 594 # very large Watchman result set.
593 595 self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1)
594 596 startclock = self._watchmanclient.getcurrentclock()
595 597 except Exception as ex:
596 598 self._watchmanclient.clearconnection()
597 599 _handleunavailable(self.ui, self._fsmonitorstate, ex)
598 600 # boo, Watchman failed. bail
599 601 return orig(
600 602 node1,
601 603 node2,
602 604 match,
603 605 listignored,
604 606 listclean,
605 607 listunknown,
606 608 listsubrepos,
607 609 )
608 610
609 611 if updatestate:
610 612 # We need info about unknown files. This may make things slower the
611 613 # first time, but whatever.
612 614 stateunknown = True
613 615 else:
614 616 stateunknown = listunknown
615 617
616 618 if updatestate:
617 619 ps = poststatus(startclock)
618 620 self.addpostdsstatus(ps)
619 621
620 622 r = orig(
621 623 node1, node2, match, listignored, listclean, stateunknown, listsubrepos
622 624 )
623 625 modified, added, removed, deleted, unknown, ignored, clean = r
624 626
625 627 if not listunknown:
626 628 unknown = []
627 629
628 630 # don't do paranoid checks if we're not going to query Watchman anyway
629 631 full = listclean or match.traversedir is not None
630 632 if self._fsmonitorstate.mode == b'paranoid' and not full:
631 633 # run status again and fall back to the old walk this time
632 634 self.dirstate._fsmonitordisable = True
633 635
634 636 # shut the UI up
635 637 quiet = self.ui.quiet
636 638 self.ui.quiet = True
637 639 fout, ferr = self.ui.fout, self.ui.ferr
638 640 self.ui.fout = self.ui.ferr = open(os.devnull, b'wb')
639 641
640 642 try:
641 643 rv2 = orig(
642 644 node1,
643 645 node2,
644 646 match,
645 647 listignored,
646 648 listclean,
647 649 listunknown,
648 650 listsubrepos,
649 651 )
650 652 finally:
651 653 self.dirstate._fsmonitordisable = False
652 654 self.ui.quiet = quiet
653 655 self.ui.fout, self.ui.ferr = fout, ferr
654 656
655 657 # clean isn't tested since it's set to True above
656 658 with self.wlock():
657 659 _cmpsets(
658 660 [modified, added, removed, deleted, unknown, ignored, clean],
659 661 rv2,
660 662 )
661 663 modified, added, removed, deleted, unknown, ignored, clean = rv2
662 664
663 665 return scmutil.status(
664 666 modified, added, removed, deleted, unknown, ignored, clean
665 667 )
666 668
667 669
668 670 class poststatus(object):
669 671 def __init__(self, startclock):
670 672 self._startclock = pycompat.sysbytes(startclock)
671 673
672 674 def __call__(self, wctx, status):
673 675 clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
674 676 hashignore = _hashignore(wctx.repo().dirstate._ignore)
675 677 notefiles = (
676 678 status.modified
677 679 + status.added
678 680 + status.removed
679 681 + status.deleted
680 682 + status.unknown
681 683 )
682 684 wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
683 685
684 686
685 687 def makedirstate(repo, dirstate):
686 688 class fsmonitordirstate(dirstate.__class__):
687 689 def _fsmonitorinit(self, repo):
688 690 # _fsmonitordisable is used in paranoid mode
689 691 self._fsmonitordisable = False
690 692 self._fsmonitorstate = repo._fsmonitorstate
691 693 self._watchmanclient = repo._watchmanclient
692 694 self._repo = weakref.proxy(repo)
693 695
694 696 def walk(self, *args, **kwargs):
695 697 orig = super(fsmonitordirstate, self).walk
696 698 if self._fsmonitordisable:
697 699 return orig(*args, **kwargs)
698 700 return overridewalk(orig, self, *args, **kwargs)
699 701
700 702 def rebuild(self, *args, **kwargs):
701 703 self._fsmonitorstate.invalidate()
702 704 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
703 705
704 706 def invalidate(self, *args, **kwargs):
705 707 self._fsmonitorstate.invalidate()
706 708 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
707 709
708 710 dirstate.__class__ = fsmonitordirstate
709 711 dirstate._fsmonitorinit(repo)
710 712
711 713
712 714 def wrapdirstate(orig, self):
713 715 ds = orig(self)
714 716 # only override the dirstate when Watchman is available for the repo
715 717 if util.safehasattr(self, b'_fsmonitorstate'):
716 718 makedirstate(self, ds)
717 719 return ds
718 720
719 721
720 722 def extsetup(ui):
721 723 extensions.wrapfilecache(
722 724 localrepo.localrepository, b'dirstate', wrapdirstate
723 725 )
724 726 if pycompat.isdarwin:
725 727 # An assist for avoiding the dangling-symlink fsevents bug
726 728 extensions.wrapfunction(os, b'symlink', wrapsymlink)
727 729
728 730 extensions.wrapfunction(merge, b'update', wrapupdate)
729 731
730 732
731 733 def wrapsymlink(orig, source, link_name):
732 734 ''' if we create a dangling symlink, also touch the parent dir
733 735 to encourage fsevents notifications to work more correctly '''
734 736 try:
735 737 return orig(source, link_name)
736 738 finally:
737 739 try:
738 740 os.utime(os.path.dirname(link_name), None)
739 741 except OSError:
740 742 pass
741 743
742 744
743 745 class state_update(object):
744 746 ''' This context manager is responsible for dispatching the state-enter
745 747 and state-leave signals to the watchman service. The enter and leave
746 748 methods can be invoked manually (for scenarios where context manager
747 749 semantics are not possible). If parameters oldnode and newnode are None,
748 750 they will be populated based on current working copy in enter and
749 751 leave, respectively. Similarly, if the distance is none, it will be
750 752 calculated based on the oldnode and newnode in the leave method.'''
751 753
752 754 def __init__(
753 755 self,
754 756 repo,
755 757 name,
756 758 oldnode=None,
757 759 newnode=None,
758 760 distance=None,
759 761 partial=False,
760 762 ):
761 763 self.repo = repo.unfiltered()
762 764 self.name = name
763 765 self.oldnode = oldnode
764 766 self.newnode = newnode
765 767 self.distance = distance
766 768 self.partial = partial
767 769 self._lock = None
768 770 self.need_leave = False
769 771
770 772 def __enter__(self):
771 773 self.enter()
772 774
773 775 def enter(self):
774 776 # Make sure we have a wlock prior to sending notifications to watchman.
775 777 # We don't want to race with other actors. In the update case,
776 778 # merge.update is going to take the wlock almost immediately. We are
777 779 # effectively extending the lock around several short sanity checks.
778 780 if self.oldnode is None:
779 781 self.oldnode = self.repo[b'.'].node()
780 782
781 783 if self.repo.currentwlock() is None:
782 784 if util.safehasattr(self.repo, b'wlocknostateupdate'):
783 785 self._lock = self.repo.wlocknostateupdate()
784 786 else:
785 787 self._lock = self.repo.wlock()
786 788 self.need_leave = self._state(b'state-enter', hex(self.oldnode))
787 789 return self
788 790
789 791 def __exit__(self, type_, value, tb):
790 792 abort = True if type_ else False
791 793 self.exit(abort=abort)
792 794
793 795 def exit(self, abort=False):
794 796 try:
795 797 if self.need_leave:
796 798 status = b'failed' if abort else b'ok'
797 799 if self.newnode is None:
798 800 self.newnode = self.repo[b'.'].node()
799 801 if self.distance is None:
800 802 self.distance = calcdistance(
801 803 self.repo, self.oldnode, self.newnode
802 804 )
803 805 self._state(b'state-leave', hex(self.newnode), status=status)
804 806 finally:
805 807 self.need_leave = False
806 808 if self._lock:
807 809 self._lock.release()
808 810
809 811 def _state(self, cmd, commithash, status=b'ok'):
810 812 if not util.safehasattr(self.repo, b'_watchmanclient'):
811 813 return False
812 814 try:
813 815 self.repo._watchmanclient.command(
814 816 cmd,
815 817 {
816 818 b'name': self.name,
817 819 b'metadata': {
818 820 # the target revision
819 821 b'rev': commithash,
820 822 # approximate number of commits between current and target
821 823 b'distance': self.distance if self.distance else 0,
822 824 # success/failure (only really meaningful for state-leave)
823 825 b'status': status,
824 826 # whether the working copy parent is changing
825 827 b'partial': self.partial,
826 828 },
827 829 },
828 830 )
829 831 return True
830 832 except Exception as e:
831 833 # Swallow any errors; fire and forget
832 834 self.repo.ui.log(
833 835 b'watchman', b'Exception %s while running %s\n', e, cmd
834 836 )
835 837 return False
836 838
837 839
838 840 # Estimate the distance between two nodes
839 841 def calcdistance(repo, oldnode, newnode):
840 842 anc = repo.changelog.ancestor(oldnode, newnode)
841 843 ancrev = repo[anc].rev()
842 844 distance = abs(repo[oldnode].rev() - ancrev) + abs(
843 845 repo[newnode].rev() - ancrev
844 846 )
845 847 return distance
846 848
847 849
848 850 # Bracket working copy updates with calls to the watchman state-enter
849 851 # and state-leave commands. This allows clients to perform more intelligent
850 852 # settling during bulk file change scenarios
851 853 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
852 854 def wrapupdate(
853 855 orig,
854 856 repo,
855 857 node,
856 858 branchmerge,
857 859 force,
858 860 ancestor=None,
859 861 mergeancestor=False,
860 862 labels=None,
861 863 matcher=None,
862 864 **kwargs
863 865 ):
864 866
865 867 distance = 0
866 868 partial = True
867 869 oldnode = repo[b'.'].node()
868 870 newnode = repo[node].node()
869 871 if matcher is None or matcher.always():
870 872 partial = False
871 873 distance = calcdistance(repo.unfiltered(), oldnode, newnode)
872 874
873 875 with state_update(
874 876 repo,
875 877 name=b"hg.update",
876 878 oldnode=oldnode,
877 879 newnode=newnode,
878 880 distance=distance,
879 881 partial=partial,
880 882 ):
881 883 return orig(
882 884 repo,
883 885 node,
884 886 branchmerge,
885 887 force,
886 888 ancestor,
887 889 mergeancestor,
888 890 labels,
889 891 matcher,
890 892 **kwargs
891 893 )
892 894
893 895
894 896 def repo_has_depth_one_nested_repo(repo):
895 897 for f in repo.wvfs.listdir():
896 898 if os.path.isdir(os.path.join(repo.root, f, b'.hg')):
897 899 msg = b'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
898 900 repo.ui.debug(msg % f)
899 901 return True
900 902 return False
901 903
902 904
903 905 def reposetup(ui, repo):
904 906 # We don't work with largefiles or inotify
905 907 exts = extensions.enabled()
906 908 for ext in _blacklist:
907 909 if ext in exts:
908 910 ui.warn(
909 911 _(
910 912 b'The fsmonitor extension is incompatible with the %s '
911 913 b'extension and has been disabled.\n'
912 914 )
913 915 % ext
914 916 )
915 917 return
916 918
917 919 if repo.local():
918 920 # We don't work with subrepos either.
919 921 #
920 922 # if repo[None].substate can cause a dirstate parse, which is too
921 923 # slow. Instead, look for a file called hgsubstate,
922 924 if repo.wvfs.exists(b'.hgsubstate') or repo.wvfs.exists(b'.hgsub'):
923 925 return
924 926
925 927 if repo_has_depth_one_nested_repo(repo):
926 928 return
927 929
928 930 fsmonitorstate = state.state(repo)
929 931 if fsmonitorstate.mode == b'off':
930 932 return
931 933
932 934 try:
933 935 client = watchmanclient.client(repo.ui, repo.root)
934 936 except Exception as ex:
935 937 _handleunavailable(ui, fsmonitorstate, ex)
936 938 return
937 939
938 940 repo._fsmonitorstate = fsmonitorstate
939 941 repo._watchmanclient = client
940 942
941 943 dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
942 944 if cached:
943 945 # at this point since fsmonitorstate wasn't present,
944 946 # repo.dirstate is not a fsmonitordirstate
945 947 makedirstate(repo, dirstate)
946 948
947 949 class fsmonitorrepo(repo.__class__):
948 950 def status(self, *args, **kwargs):
949 951 orig = super(fsmonitorrepo, self).status
950 952 return overridestatus(orig, self, *args, **kwargs)
951 953
952 954 def wlocknostateupdate(self, *args, **kwargs):
953 955 return super(fsmonitorrepo, self).wlock(*args, **kwargs)
954 956
955 957 def wlock(self, *args, **kwargs):
956 958 l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
957 959 if not ui.configbool(
958 960 b"experimental", b"fsmonitor.transaction_notify"
959 961 ):
960 962 return l
961 963 if l.held != 1:
962 964 return l
963 965 origrelease = l.releasefn
964 966
965 967 def staterelease():
966 968 if origrelease:
967 969 origrelease()
968 970 if l.stateupdate:
969 971 l.stateupdate.exit()
970 972 l.stateupdate = None
971 973
972 974 try:
973 975 l.stateupdate = None
974 976 l.stateupdate = state_update(self, name=b"hg.transaction")
975 977 l.stateupdate.enter()
976 978 l.releasefn = staterelease
977 979 except Exception as e:
978 980 # Swallow any errors; fire and forget
979 981 self.ui.log(
980 982 b'watchman', b'Exception in state update %s\n', e
981 983 )
982 984 return l
983 985
984 986 repo.__class__ = fsmonitorrepo
@@ -1,1585 +1,1588 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section, configprefix + b'nodates', default=False,
137 137 )
138 138 coreconfigitem(
139 139 section, configprefix + b'showfunc', default=False,
140 140 )
141 141 coreconfigitem(
142 142 section, configprefix + b'unified', default=None,
143 143 )
144 144 coreconfigitem(
145 145 section, configprefix + b'git', default=False,
146 146 )
147 147 coreconfigitem(
148 148 section, configprefix + b'ignorews', default=False,
149 149 )
150 150 coreconfigitem(
151 151 section, configprefix + b'ignorewsamount', default=False,
152 152 )
153 153 coreconfigitem(
154 154 section, configprefix + b'ignoreblanklines', default=False,
155 155 )
156 156 coreconfigitem(
157 157 section, configprefix + b'ignorewseol', default=False,
158 158 )
159 159 coreconfigitem(
160 160 section, configprefix + b'nobinary', default=False,
161 161 )
162 162 coreconfigitem(
163 163 section, configprefix + b'noprefix', default=False,
164 164 )
165 165 coreconfigitem(
166 166 section, configprefix + b'word-diff', default=False,
167 167 )
168 168
169 169
170 170 coreconfigitem(
171 171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 172 )
173 173 coreconfigitem(
174 174 b'auth', b'cookiefile', default=None,
175 175 )
176 176 _registerdiffopts(section=b'annotate')
177 177 # bookmarks.pushing: internal hack for discovery
178 178 coreconfigitem(
179 179 b'bookmarks', b'pushing', default=list,
180 180 )
181 181 # bundle.mainreporoot: internal hack for bundlerepo
182 182 coreconfigitem(
183 183 b'bundle', b'mainreporoot', default=b'',
184 184 )
185 185 coreconfigitem(
186 186 b'censor', b'policy', default=b'abort', experimental=True,
187 187 )
188 188 coreconfigitem(
189 189 b'chgserver', b'idletimeout', default=3600,
190 190 )
191 191 coreconfigitem(
192 192 b'chgserver', b'skiphash', default=False,
193 193 )
194 194 coreconfigitem(
195 195 b'cmdserver', b'log', default=None,
196 196 )
197 197 coreconfigitem(
198 198 b'cmdserver', b'max-log-files', default=7,
199 199 )
200 200 coreconfigitem(
201 201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 202 )
203 203 coreconfigitem(
204 204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 205 )
206 206 coreconfigitem(
207 207 b'cmdserver', b'message-encodings', default=list,
208 208 )
209 209 coreconfigitem(
210 210 b'cmdserver',
211 211 b'track-log',
212 212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 213 )
214 214 coreconfigitem(
215 215 b'cmdserver', b'shutdown-on-interrupt', default=True,
216 216 )
217 217 coreconfigitem(
218 218 b'color', b'.*', default=None, generic=True,
219 219 )
220 220 coreconfigitem(
221 221 b'color', b'mode', default=b'auto',
222 222 )
223 223 coreconfigitem(
224 224 b'color', b'pagermode', default=dynamicdefault,
225 225 )
226 226 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
227 227 coreconfigitem(
228 228 b'commands', b'commit.post-status', default=False,
229 229 )
230 230 coreconfigitem(
231 231 b'commands', b'grep.all-files', default=False, experimental=True,
232 232 )
233 233 coreconfigitem(
234 234 b'commands', b'merge.require-rev', default=False,
235 235 )
236 236 coreconfigitem(
237 237 b'commands', b'push.require-revs', default=False,
238 238 )
239 239 coreconfigitem(
240 240 b'commands', b'resolve.confirm', default=False,
241 241 )
242 242 coreconfigitem(
243 243 b'commands', b'resolve.explicit-re-merge', default=False,
244 244 )
245 245 coreconfigitem(
246 246 b'commands', b'resolve.mark-check', default=b'none',
247 247 )
248 248 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
249 249 coreconfigitem(
250 250 b'commands', b'show.aliasprefix', default=list,
251 251 )
252 252 coreconfigitem(
253 253 b'commands', b'status.relative', default=False,
254 254 )
255 255 coreconfigitem(
256 256 b'commands', b'status.skipstates', default=[], experimental=True,
257 257 )
258 258 coreconfigitem(
259 259 b'commands', b'status.terse', default=b'',
260 260 )
261 261 coreconfigitem(
262 262 b'commands', b'status.verbose', default=False,
263 263 )
264 264 coreconfigitem(
265 265 b'commands', b'update.check', default=None,
266 266 )
267 267 coreconfigitem(
268 268 b'commands', b'update.requiredest', default=False,
269 269 )
270 270 coreconfigitem(
271 271 b'committemplate', b'.*', default=None, generic=True,
272 272 )
273 273 coreconfigitem(
274 274 b'convert', b'bzr.saverev', default=True,
275 275 )
276 276 coreconfigitem(
277 277 b'convert', b'cvsps.cache', default=True,
278 278 )
279 279 coreconfigitem(
280 280 b'convert', b'cvsps.fuzz', default=60,
281 281 )
282 282 coreconfigitem(
283 283 b'convert', b'cvsps.logencoding', default=None,
284 284 )
285 285 coreconfigitem(
286 286 b'convert', b'cvsps.mergefrom', default=None,
287 287 )
288 288 coreconfigitem(
289 289 b'convert', b'cvsps.mergeto', default=None,
290 290 )
291 291 coreconfigitem(
292 292 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
293 293 )
294 294 coreconfigitem(
295 295 b'convert', b'git.extrakeys', default=list,
296 296 )
297 297 coreconfigitem(
298 298 b'convert', b'git.findcopiesharder', default=False,
299 299 )
300 300 coreconfigitem(
301 301 b'convert', b'git.remoteprefix', default=b'remote',
302 302 )
303 303 coreconfigitem(
304 304 b'convert', b'git.renamelimit', default=400,
305 305 )
306 306 coreconfigitem(
307 307 b'convert', b'git.saverev', default=True,
308 308 )
309 309 coreconfigitem(
310 310 b'convert', b'git.similarity', default=50,
311 311 )
312 312 coreconfigitem(
313 313 b'convert', b'git.skipsubmodules', default=False,
314 314 )
315 315 coreconfigitem(
316 316 b'convert', b'hg.clonebranches', default=False,
317 317 )
318 318 coreconfigitem(
319 319 b'convert', b'hg.ignoreerrors', default=False,
320 320 )
321 321 coreconfigitem(
322 322 b'convert', b'hg.preserve-hash', default=False,
323 323 )
324 324 coreconfigitem(
325 325 b'convert', b'hg.revs', default=None,
326 326 )
327 327 coreconfigitem(
328 328 b'convert', b'hg.saverev', default=False,
329 329 )
330 330 coreconfigitem(
331 331 b'convert', b'hg.sourcename', default=None,
332 332 )
333 333 coreconfigitem(
334 334 b'convert', b'hg.startrev', default=None,
335 335 )
336 336 coreconfigitem(
337 337 b'convert', b'hg.tagsbranch', default=b'default',
338 338 )
339 339 coreconfigitem(
340 340 b'convert', b'hg.usebranchnames', default=True,
341 341 )
342 342 coreconfigitem(
343 343 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
344 344 )
345 345 coreconfigitem(
346 346 b'convert', b'localtimezone', default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'convert', b'p4.encoding', default=dynamicdefault,
350 350 )
351 351 coreconfigitem(
352 352 b'convert', b'p4.startrev', default=0,
353 353 )
354 354 coreconfigitem(
355 355 b'convert', b'skiptags', default=False,
356 356 )
357 357 coreconfigitem(
358 358 b'convert', b'svn.debugsvnlog', default=True,
359 359 )
360 360 coreconfigitem(
361 361 b'convert', b'svn.trunk', default=None,
362 362 )
363 363 coreconfigitem(
364 364 b'convert', b'svn.tags', default=None,
365 365 )
366 366 coreconfigitem(
367 367 b'convert', b'svn.branches', default=None,
368 368 )
369 369 coreconfigitem(
370 370 b'convert', b'svn.startrev', default=0,
371 371 )
372 372 coreconfigitem(
373 373 b'debug', b'dirstate.delaywrite', default=0,
374 374 )
375 375 coreconfigitem(
376 376 b'defaults', b'.*', default=None, generic=True,
377 377 )
378 378 coreconfigitem(
379 379 b'devel', b'all-warnings', default=False,
380 380 )
381 381 coreconfigitem(
382 382 b'devel', b'bundle2.debug', default=False,
383 383 )
384 384 coreconfigitem(
385 385 b'devel', b'bundle.delta', default=b'',
386 386 )
387 387 coreconfigitem(
388 388 b'devel', b'cache-vfs', default=None,
389 389 )
390 390 coreconfigitem(
391 391 b'devel', b'check-locks', default=False,
392 392 )
393 393 coreconfigitem(
394 394 b'devel', b'check-relroot', default=False,
395 395 )
396 396 coreconfigitem(
397 397 b'devel', b'default-date', default=None,
398 398 )
399 399 coreconfigitem(
400 400 b'devel', b'deprec-warn', default=False,
401 401 )
402 402 coreconfigitem(
403 403 b'devel', b'disableloaddefaultcerts', default=False,
404 404 )
405 405 coreconfigitem(
406 406 b'devel', b'warn-empty-changegroup', default=False,
407 407 )
408 408 coreconfigitem(
409 409 b'devel', b'legacy.exchange', default=list,
410 410 )
411 411 coreconfigitem(
412 412 b'devel', b'persistent-nodemap', default=False,
413 413 )
414 414 coreconfigitem(
415 415 b'devel', b'servercafile', default=b'',
416 416 )
417 417 coreconfigitem(
418 418 b'devel', b'serverexactprotocol', default=b'',
419 419 )
420 420 coreconfigitem(
421 421 b'devel', b'serverrequirecert', default=False,
422 422 )
423 423 coreconfigitem(
424 424 b'devel', b'strip-obsmarkers', default=True,
425 425 )
426 426 coreconfigitem(
427 427 b'devel', b'warn-config', default=None,
428 428 )
429 429 coreconfigitem(
430 430 b'devel', b'warn-config-default', default=None,
431 431 )
432 432 coreconfigitem(
433 433 b'devel', b'user.obsmarker', default=None,
434 434 )
435 435 coreconfigitem(
436 436 b'devel', b'warn-config-unknown', default=None,
437 437 )
438 438 coreconfigitem(
439 439 b'devel', b'debug.copies', default=False,
440 440 )
441 441 coreconfigitem(
442 442 b'devel', b'debug.extensions', default=False,
443 443 )
444 444 coreconfigitem(
445 445 b'devel', b'debug.repo-filters', default=False,
446 446 )
447 447 coreconfigitem(
448 448 b'devel', b'debug.peer-request', default=False,
449 449 )
450 450 coreconfigitem(
451 451 b'devel', b'discovery.randomize', default=True,
452 452 )
453 453 _registerdiffopts(section=b'diff')
454 454 coreconfigitem(
455 455 b'email', b'bcc', default=None,
456 456 )
457 457 coreconfigitem(
458 458 b'email', b'cc', default=None,
459 459 )
460 460 coreconfigitem(
461 461 b'email', b'charsets', default=list,
462 462 )
463 463 coreconfigitem(
464 464 b'email', b'from', default=None,
465 465 )
466 466 coreconfigitem(
467 467 b'email', b'method', default=b'smtp',
468 468 )
469 469 coreconfigitem(
470 470 b'email', b'reply-to', default=None,
471 471 )
472 472 coreconfigitem(
473 473 b'email', b'to', default=None,
474 474 )
475 475 coreconfigitem(
476 476 b'experimental', b'archivemetatemplate', default=dynamicdefault,
477 477 )
478 478 coreconfigitem(
479 479 b'experimental', b'auto-publish', default=b'publish',
480 480 )
481 481 coreconfigitem(
482 482 b'experimental', b'bundle-phases', default=False,
483 483 )
484 484 coreconfigitem(
485 485 b'experimental', b'bundle2-advertise', default=True,
486 486 )
487 487 coreconfigitem(
488 488 b'experimental', b'bundle2-output-capture', default=False,
489 489 )
490 490 coreconfigitem(
491 491 b'experimental', b'bundle2.pushback', default=False,
492 492 )
493 493 coreconfigitem(
494 494 b'experimental', b'bundle2lazylocking', default=False,
495 495 )
496 496 coreconfigitem(
497 497 b'experimental', b'bundlecomplevel', default=None,
498 498 )
499 499 coreconfigitem(
500 500 b'experimental', b'bundlecomplevel.bzip2', default=None,
501 501 )
502 502 coreconfigitem(
503 503 b'experimental', b'bundlecomplevel.gzip', default=None,
504 504 )
505 505 coreconfigitem(
506 506 b'experimental', b'bundlecomplevel.none', default=None,
507 507 )
508 508 coreconfigitem(
509 509 b'experimental', b'bundlecomplevel.zstd', default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'experimental', b'changegroup3', default=False,
513 513 )
514 514 coreconfigitem(
515 515 b'experimental', b'cleanup-as-archived', default=False,
516 516 )
517 517 coreconfigitem(
518 518 b'experimental', b'clientcompressionengines', default=list,
519 519 )
520 520 coreconfigitem(
521 521 b'experimental', b'copytrace', default=b'on',
522 522 )
523 523 coreconfigitem(
524 524 b'experimental', b'copytrace.movecandidateslimit', default=100,
525 525 )
526 526 coreconfigitem(
527 527 b'experimental', b'copytrace.sourcecommitlimit', default=100,
528 528 )
529 529 coreconfigitem(
530 530 b'experimental', b'copies.read-from', default=b"filelog-only",
531 531 )
532 532 coreconfigitem(
533 533 b'experimental', b'copies.write-to', default=b'filelog-only',
534 534 )
535 535 coreconfigitem(
536 536 b'experimental', b'crecordtest', default=None,
537 537 )
538 538 coreconfigitem(
539 539 b'experimental', b'directaccess', default=False,
540 540 )
541 541 coreconfigitem(
542 542 b'experimental', b'directaccess.revnums', default=False,
543 543 )
544 544 coreconfigitem(
545 545 b'experimental', b'editortmpinhg', default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'experimental', b'evolution', default=list,
549 549 )
550 550 coreconfigitem(
551 551 b'experimental',
552 552 b'evolution.allowdivergence',
553 553 default=False,
554 554 alias=[(b'experimental', b'allowdivergence')],
555 555 )
556 556 coreconfigitem(
557 557 b'experimental', b'evolution.allowunstable', default=None,
558 558 )
559 559 coreconfigitem(
560 560 b'experimental', b'evolution.createmarkers', default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'experimental',
564 564 b'evolution.effect-flags',
565 565 default=True,
566 566 alias=[(b'experimental', b'effect-flags')],
567 567 )
568 568 coreconfigitem(
569 569 b'experimental', b'evolution.exchange', default=None,
570 570 )
571 571 coreconfigitem(
572 572 b'experimental', b'evolution.bundle-obsmarker', default=False,
573 573 )
574 574 coreconfigitem(
575 575 b'experimental', b'log.topo', default=False,
576 576 )
577 577 coreconfigitem(
578 578 b'experimental', b'evolution.report-instabilities', default=True,
579 579 )
580 580 coreconfigitem(
581 581 b'experimental', b'evolution.track-operation', default=True,
582 582 )
583 583 # repo-level config to exclude a revset visibility
584 584 #
585 585 # The target use case is to use `share` to expose different subset of the same
586 586 # repository, especially server side. See also `server.view`.
587 587 coreconfigitem(
588 588 b'experimental', b'extra-filter-revs', default=None,
589 589 )
590 590 coreconfigitem(
591 591 b'experimental', b'maxdeltachainspan', default=-1,
592 592 )
593 593 coreconfigitem(
594 594 b'experimental', b'mergetempdirprefix', default=None,
595 595 )
596 596 coreconfigitem(
597 597 b'experimental', b'mmapindexthreshold', default=None,
598 598 )
599 599 coreconfigitem(
600 600 b'experimental', b'narrow', default=False,
601 601 )
602 602 coreconfigitem(
603 603 b'experimental', b'nonnormalparanoidcheck', default=False,
604 604 )
605 605 coreconfigitem(
606 606 b'experimental', b'exportableenviron', default=list,
607 607 )
608 608 coreconfigitem(
609 609 b'experimental', b'extendedheader.index', default=None,
610 610 )
611 611 coreconfigitem(
612 612 b'experimental', b'extendedheader.similarity', default=False,
613 613 )
614 614 coreconfigitem(
615 615 b'experimental', b'graphshorten', default=False,
616 616 )
617 617 coreconfigitem(
618 618 b'experimental', b'graphstyle.parent', default=dynamicdefault,
619 619 )
620 620 coreconfigitem(
621 621 b'experimental', b'graphstyle.missing', default=dynamicdefault,
622 622 )
623 623 coreconfigitem(
624 624 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
625 625 )
626 626 coreconfigitem(
627 627 b'experimental', b'hook-track-tags', default=False,
628 628 )
629 629 coreconfigitem(
630 630 b'experimental', b'httppeer.advertise-v2', default=False,
631 631 )
632 632 coreconfigitem(
633 633 b'experimental', b'httppeer.v2-encoder-order', default=None,
634 634 )
635 635 coreconfigitem(
636 636 b'experimental', b'httppostargs', default=False,
637 637 )
638 638 coreconfigitem(
639 639 b'experimental', b'mergedriver', default=None,
640 640 )
641 641 coreconfigitem(b'experimental', b'nointerrupt', default=False)
642 642 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
643 643
644 644 coreconfigitem(
645 645 b'experimental', b'obsmarkers-exchange-debug', default=False,
646 646 )
647 647 coreconfigitem(
648 648 b'experimental', b'remotenames', default=False,
649 649 )
650 650 coreconfigitem(
651 651 b'experimental', b'removeemptydirs', default=True,
652 652 )
653 653 coreconfigitem(
654 654 b'experimental', b'revert.interactive.select-to-keep', default=False,
655 655 )
656 656 coreconfigitem(
657 657 b'experimental', b'revisions.prefixhexnode', default=False,
658 658 )
659 659 coreconfigitem(
660 660 b'experimental', b'revlogv2', default=None,
661 661 )
662 662 coreconfigitem(
663 663 b'experimental', b'revisions.disambiguatewithin', default=None,
664 664 )
665 665 coreconfigitem(
666 666 b'experimental', b'rust.index', default=False,
667 667 )
668 668 coreconfigitem(
669 669 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
670 670 )
671 671 coreconfigitem(
672 672 b'experimental',
673 673 b'server.manifestdata.recommended-batch-size',
674 674 default=100000,
675 675 )
676 676 coreconfigitem(
677 677 b'experimental', b'server.stream-narrow-clones', default=False,
678 678 )
679 679 coreconfigitem(
680 680 b'experimental', b'single-head-per-branch', default=False,
681 681 )
682 682 coreconfigitem(
683 683 b'experimental',
684 684 b'single-head-per-branch:account-closed-heads',
685 685 default=False,
686 686 )
687 687 coreconfigitem(
688 688 b'experimental', b'sshserver.support-v2', default=False,
689 689 )
690 690 coreconfigitem(
691 691 b'experimental', b'sparse-read', default=False,
692 692 )
693 693 coreconfigitem(
694 694 b'experimental', b'sparse-read.density-threshold', default=0.50,
695 695 )
696 696 coreconfigitem(
697 697 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
698 698 )
699 699 coreconfigitem(
700 700 b'experimental', b'treemanifest', default=False,
701 701 )
702 702 coreconfigitem(
703 703 b'experimental', b'update.atomic-file', default=False,
704 704 )
705 705 coreconfigitem(
706 706 b'experimental', b'sshpeer.advertise-v2', default=False,
707 707 )
708 708 coreconfigitem(
709 709 b'experimental', b'web.apiserver', default=False,
710 710 )
711 711 coreconfigitem(
712 712 b'experimental', b'web.api.http-v2', default=False,
713 713 )
714 714 coreconfigitem(
715 715 b'experimental', b'web.api.debugreflect', default=False,
716 716 )
717 717 coreconfigitem(
718 718 b'experimental', b'worker.wdir-get-thread-safe', default=False,
719 719 )
720 720 coreconfigitem(
721 721 b'experimental', b'worker.repository-upgrade', default=False,
722 722 )
723 723 coreconfigitem(
724 724 b'experimental', b'xdiff', default=False,
725 725 )
726 726 coreconfigitem(
727 727 b'extensions', b'.*', default=None, generic=True,
728 728 )
729 729 coreconfigitem(
730 730 b'extdata', b'.*', default=None, generic=True,
731 731 )
732 732 coreconfigitem(
733 733 b'format', b'bookmarks-in-store', default=False,
734 734 )
735 735 coreconfigitem(
736 736 b'format', b'chunkcachesize', default=None, experimental=True,
737 737 )
738 738 coreconfigitem(
739 739 b'format', b'dotencode', default=True,
740 740 )
741 741 coreconfigitem(
742 742 b'format', b'generaldelta', default=False, experimental=True,
743 743 )
744 744 coreconfigitem(
745 745 b'format', b'manifestcachesize', default=None, experimental=True,
746 746 )
747 747 coreconfigitem(
748 748 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
749 749 )
750 750 coreconfigitem(
751 751 b'format', b'obsstore-version', default=None,
752 752 )
753 753 coreconfigitem(
754 754 b'format', b'sparse-revlog', default=True,
755 755 )
756 756 coreconfigitem(
757 757 b'format',
758 758 b'revlog-compression',
759 759 default=lambda: [b'zlib'],
760 760 alias=[(b'experimental', b'format.compression')],
761 761 )
762 762 coreconfigitem(
763 763 b'format', b'usefncache', default=True,
764 764 )
765 765 coreconfigitem(
766 766 b'format', b'usegeneraldelta', default=True,
767 767 )
768 768 coreconfigitem(
769 769 b'format', b'usestore', default=True,
770 770 )
771 771 # Right now, the only efficient implement of the nodemap logic is in Rust, so
772 772 # the persistent nodemap feature needs to stay experimental as long as the Rust
773 773 # extensions are an experimental feature.
774 774 coreconfigitem(
775 775 b'format', b'use-persistent-nodemap', default=False, experimental=True
776 776 )
777 777 coreconfigitem(
778 778 b'format',
779 779 b'exp-use-copies-side-data-changeset',
780 780 default=False,
781 781 experimental=True,
782 782 )
783 783 coreconfigitem(
784 784 b'format', b'exp-use-side-data', default=False, experimental=True,
785 785 )
786 786 coreconfigitem(
787 787 b'format', b'internal-phase', default=False, experimental=True,
788 788 )
789 789 coreconfigitem(
790 790 b'fsmonitor', b'warn_when_unused', default=True,
791 791 )
792 792 coreconfigitem(
793 793 b'fsmonitor', b'warn_update_file_count', default=50000,
794 794 )
795 795 coreconfigitem(
796 b'fsmonitor', b'warn_update_file_count_rust', default=400000,
797 )
798 coreconfigitem(
796 799 b'help', br'hidden-command\..*', default=False, generic=True,
797 800 )
798 801 coreconfigitem(
799 802 b'help', br'hidden-topic\..*', default=False, generic=True,
800 803 )
801 804 coreconfigitem(
802 805 b'hooks', b'.*', default=dynamicdefault, generic=True,
803 806 )
804 807 coreconfigitem(
805 808 b'hgweb-paths', b'.*', default=list, generic=True,
806 809 )
807 810 coreconfigitem(
808 811 b'hostfingerprints', b'.*', default=list, generic=True,
809 812 )
810 813 coreconfigitem(
811 814 b'hostsecurity', b'ciphers', default=None,
812 815 )
813 816 coreconfigitem(
814 817 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
815 818 )
816 819 coreconfigitem(
817 820 b'hostsecurity',
818 821 b'.*:minimumprotocol$',
819 822 default=dynamicdefault,
820 823 generic=True,
821 824 )
822 825 coreconfigitem(
823 826 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
824 827 )
825 828 coreconfigitem(
826 829 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
827 830 )
828 831 coreconfigitem(
829 832 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
830 833 )
831 834
832 835 coreconfigitem(
833 836 b'http_proxy', b'always', default=False,
834 837 )
835 838 coreconfigitem(
836 839 b'http_proxy', b'host', default=None,
837 840 )
838 841 coreconfigitem(
839 842 b'http_proxy', b'no', default=list,
840 843 )
841 844 coreconfigitem(
842 845 b'http_proxy', b'passwd', default=None,
843 846 )
844 847 coreconfigitem(
845 848 b'http_proxy', b'user', default=None,
846 849 )
847 850
848 851 coreconfigitem(
849 852 b'http', b'timeout', default=None,
850 853 )
851 854
852 855 coreconfigitem(
853 856 b'logtoprocess', b'commandexception', default=None,
854 857 )
855 858 coreconfigitem(
856 859 b'logtoprocess', b'commandfinish', default=None,
857 860 )
858 861 coreconfigitem(
859 862 b'logtoprocess', b'command', default=None,
860 863 )
861 864 coreconfigitem(
862 865 b'logtoprocess', b'develwarn', default=None,
863 866 )
864 867 coreconfigitem(
865 868 b'logtoprocess', b'uiblocked', default=None,
866 869 )
867 870 coreconfigitem(
868 871 b'merge', b'checkunknown', default=b'abort',
869 872 )
870 873 coreconfigitem(
871 874 b'merge', b'checkignored', default=b'abort',
872 875 )
873 876 coreconfigitem(
874 877 b'experimental', b'merge.checkpathconflicts', default=False,
875 878 )
876 879 coreconfigitem(
877 880 b'merge', b'followcopies', default=True,
878 881 )
879 882 coreconfigitem(
880 883 b'merge', b'on-failure', default=b'continue',
881 884 )
882 885 coreconfigitem(
883 886 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
884 887 )
885 888 coreconfigitem(
886 889 b'merge', b'strict-capability-check', default=False,
887 890 )
888 891 coreconfigitem(
889 892 b'merge-tools', b'.*', default=None, generic=True,
890 893 )
891 894 coreconfigitem(
892 895 b'merge-tools',
893 896 br'.*\.args$',
894 897 default=b"$local $base $other",
895 898 generic=True,
896 899 priority=-1,
897 900 )
898 901 coreconfigitem(
899 902 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
900 903 )
901 904 coreconfigitem(
902 905 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
903 906 )
904 907 coreconfigitem(
905 908 b'merge-tools',
906 909 br'.*\.checkchanged$',
907 910 default=False,
908 911 generic=True,
909 912 priority=-1,
910 913 )
911 914 coreconfigitem(
912 915 b'merge-tools',
913 916 br'.*\.executable$',
914 917 default=dynamicdefault,
915 918 generic=True,
916 919 priority=-1,
917 920 )
918 921 coreconfigitem(
919 922 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
920 923 )
921 924 coreconfigitem(
922 925 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
923 926 )
924 927 coreconfigitem(
925 928 b'merge-tools',
926 929 br'.*\.mergemarkers$',
927 930 default=b'basic',
928 931 generic=True,
929 932 priority=-1,
930 933 )
931 934 coreconfigitem(
932 935 b'merge-tools',
933 936 br'.*\.mergemarkertemplate$',
934 937 default=dynamicdefault, # take from ui.mergemarkertemplate
935 938 generic=True,
936 939 priority=-1,
937 940 )
938 941 coreconfigitem(
939 942 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
940 943 )
941 944 coreconfigitem(
942 945 b'merge-tools',
943 946 br'.*\.premerge$',
944 947 default=dynamicdefault,
945 948 generic=True,
946 949 priority=-1,
947 950 )
948 951 coreconfigitem(
949 952 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
950 953 )
951 954 coreconfigitem(
952 955 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
953 956 )
954 957 coreconfigitem(
955 958 b'pager', b'ignore', default=list,
956 959 )
957 960 coreconfigitem(
958 961 b'pager', b'pager', default=dynamicdefault,
959 962 )
960 963 coreconfigitem(
961 964 b'patch', b'eol', default=b'strict',
962 965 )
963 966 coreconfigitem(
964 967 b'patch', b'fuzz', default=2,
965 968 )
966 969 coreconfigitem(
967 970 b'paths', b'default', default=None,
968 971 )
969 972 coreconfigitem(
970 973 b'paths', b'default-push', default=None,
971 974 )
972 975 coreconfigitem(
973 976 b'paths', b'.*', default=None, generic=True,
974 977 )
975 978 coreconfigitem(
976 979 b'phases', b'checksubrepos', default=b'follow',
977 980 )
978 981 coreconfigitem(
979 982 b'phases', b'new-commit', default=b'draft',
980 983 )
981 984 coreconfigitem(
982 985 b'phases', b'publish', default=True,
983 986 )
984 987 coreconfigitem(
985 988 b'profiling', b'enabled', default=False,
986 989 )
987 990 coreconfigitem(
988 991 b'profiling', b'format', default=b'text',
989 992 )
990 993 coreconfigitem(
991 994 b'profiling', b'freq', default=1000,
992 995 )
993 996 coreconfigitem(
994 997 b'profiling', b'limit', default=30,
995 998 )
996 999 coreconfigitem(
997 1000 b'profiling', b'nested', default=0,
998 1001 )
999 1002 coreconfigitem(
1000 1003 b'profiling', b'output', default=None,
1001 1004 )
1002 1005 coreconfigitem(
1003 1006 b'profiling', b'showmax', default=0.999,
1004 1007 )
1005 1008 coreconfigitem(
1006 1009 b'profiling', b'showmin', default=dynamicdefault,
1007 1010 )
1008 1011 coreconfigitem(
1009 1012 b'profiling', b'showtime', default=True,
1010 1013 )
1011 1014 coreconfigitem(
1012 1015 b'profiling', b'sort', default=b'inlinetime',
1013 1016 )
1014 1017 coreconfigitem(
1015 1018 b'profiling', b'statformat', default=b'hotpath',
1016 1019 )
1017 1020 coreconfigitem(
1018 1021 b'profiling', b'time-track', default=dynamicdefault,
1019 1022 )
1020 1023 coreconfigitem(
1021 1024 b'profiling', b'type', default=b'stat',
1022 1025 )
1023 1026 coreconfigitem(
1024 1027 b'progress', b'assume-tty', default=False,
1025 1028 )
1026 1029 coreconfigitem(
1027 1030 b'progress', b'changedelay', default=1,
1028 1031 )
1029 1032 coreconfigitem(
1030 1033 b'progress', b'clear-complete', default=True,
1031 1034 )
1032 1035 coreconfigitem(
1033 1036 b'progress', b'debug', default=False,
1034 1037 )
1035 1038 coreconfigitem(
1036 1039 b'progress', b'delay', default=3,
1037 1040 )
1038 1041 coreconfigitem(
1039 1042 b'progress', b'disable', default=False,
1040 1043 )
1041 1044 coreconfigitem(
1042 1045 b'progress', b'estimateinterval', default=60.0,
1043 1046 )
1044 1047 coreconfigitem(
1045 1048 b'progress',
1046 1049 b'format',
1047 1050 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1048 1051 )
1049 1052 coreconfigitem(
1050 1053 b'progress', b'refresh', default=0.1,
1051 1054 )
1052 1055 coreconfigitem(
1053 1056 b'progress', b'width', default=dynamicdefault,
1054 1057 )
1055 1058 coreconfigitem(
1056 1059 b'pull', b'confirm', default=False,
1057 1060 )
1058 1061 coreconfigitem(
1059 1062 b'push', b'pushvars.server', default=False,
1060 1063 )
1061 1064 coreconfigitem(
1062 1065 b'rewrite',
1063 1066 b'backup-bundle',
1064 1067 default=True,
1065 1068 alias=[(b'ui', b'history-editing-backup')],
1066 1069 )
1067 1070 coreconfigitem(
1068 1071 b'rewrite', b'update-timestamp', default=False,
1069 1072 )
1070 1073 coreconfigitem(
1071 1074 b'rewrite', b'empty-successor', default=b'skip', experimental=True,
1072 1075 )
1073 1076 coreconfigitem(
1074 1077 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1075 1078 )
1076 1079 coreconfigitem(
1077 1080 b'storage',
1078 1081 b'revlog.optimize-delta-parent-choice',
1079 1082 default=True,
1080 1083 alias=[(b'format', b'aggressivemergedeltas')],
1081 1084 )
1082 1085 # experimental as long as rust is experimental (or a C version is implemented)
1083 1086 coreconfigitem(
1084 1087 b'storage', b'revlog.nodemap.mmap', default=True, experimental=True
1085 1088 )
1086 1089 # experimental as long as format.use-persistent-nodemap is.
1087 1090 coreconfigitem(
1088 1091 b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
1089 1092 )
1090 1093 coreconfigitem(
1091 1094 b'storage', b'revlog.reuse-external-delta', default=True,
1092 1095 )
1093 1096 coreconfigitem(
1094 1097 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1095 1098 )
1096 1099 coreconfigitem(
1097 1100 b'storage', b'revlog.zlib.level', default=None,
1098 1101 )
1099 1102 coreconfigitem(
1100 1103 b'storage', b'revlog.zstd.level', default=None,
1101 1104 )
1102 1105 coreconfigitem(
1103 1106 b'server', b'bookmarks-pushkey-compat', default=True,
1104 1107 )
1105 1108 coreconfigitem(
1106 1109 b'server', b'bundle1', default=True,
1107 1110 )
1108 1111 coreconfigitem(
1109 1112 b'server', b'bundle1gd', default=None,
1110 1113 )
1111 1114 coreconfigitem(
1112 1115 b'server', b'bundle1.pull', default=None,
1113 1116 )
1114 1117 coreconfigitem(
1115 1118 b'server', b'bundle1gd.pull', default=None,
1116 1119 )
1117 1120 coreconfigitem(
1118 1121 b'server', b'bundle1.push', default=None,
1119 1122 )
1120 1123 coreconfigitem(
1121 1124 b'server', b'bundle1gd.push', default=None,
1122 1125 )
1123 1126 coreconfigitem(
1124 1127 b'server',
1125 1128 b'bundle2.stream',
1126 1129 default=True,
1127 1130 alias=[(b'experimental', b'bundle2.stream')],
1128 1131 )
1129 1132 coreconfigitem(
1130 1133 b'server', b'compressionengines', default=list,
1131 1134 )
1132 1135 coreconfigitem(
1133 1136 b'server', b'concurrent-push-mode', default=b'check-related',
1134 1137 )
1135 1138 coreconfigitem(
1136 1139 b'server', b'disablefullbundle', default=False,
1137 1140 )
1138 1141 coreconfigitem(
1139 1142 b'server', b'maxhttpheaderlen', default=1024,
1140 1143 )
1141 1144 coreconfigitem(
1142 1145 b'server', b'pullbundle', default=False,
1143 1146 )
1144 1147 coreconfigitem(
1145 1148 b'server', b'preferuncompressed', default=False,
1146 1149 )
1147 1150 coreconfigitem(
1148 1151 b'server', b'streamunbundle', default=False,
1149 1152 )
1150 1153 coreconfigitem(
1151 1154 b'server', b'uncompressed', default=True,
1152 1155 )
1153 1156 coreconfigitem(
1154 1157 b'server', b'uncompressedallowsecret', default=False,
1155 1158 )
1156 1159 coreconfigitem(
1157 1160 b'server', b'view', default=b'served',
1158 1161 )
1159 1162 coreconfigitem(
1160 1163 b'server', b'validate', default=False,
1161 1164 )
1162 1165 coreconfigitem(
1163 1166 b'server', b'zliblevel', default=-1,
1164 1167 )
1165 1168 coreconfigitem(
1166 1169 b'server', b'zstdlevel', default=3,
1167 1170 )
1168 1171 coreconfigitem(
1169 1172 b'share', b'pool', default=None,
1170 1173 )
1171 1174 coreconfigitem(
1172 1175 b'share', b'poolnaming', default=b'identity',
1173 1176 )
1174 1177 coreconfigitem(
1175 1178 b'shelve', b'maxbackups', default=10,
1176 1179 )
1177 1180 coreconfigitem(
1178 1181 b'smtp', b'host', default=None,
1179 1182 )
1180 1183 coreconfigitem(
1181 1184 b'smtp', b'local_hostname', default=None,
1182 1185 )
1183 1186 coreconfigitem(
1184 1187 b'smtp', b'password', default=None,
1185 1188 )
1186 1189 coreconfigitem(
1187 1190 b'smtp', b'port', default=dynamicdefault,
1188 1191 )
1189 1192 coreconfigitem(
1190 1193 b'smtp', b'tls', default=b'none',
1191 1194 )
1192 1195 coreconfigitem(
1193 1196 b'smtp', b'username', default=None,
1194 1197 )
1195 1198 coreconfigitem(
1196 1199 b'sparse', b'missingwarning', default=True, experimental=True,
1197 1200 )
1198 1201 coreconfigitem(
1199 1202 b'subrepos',
1200 1203 b'allowed',
1201 1204 default=dynamicdefault, # to make backporting simpler
1202 1205 )
1203 1206 coreconfigitem(
1204 1207 b'subrepos', b'hg:allowed', default=dynamicdefault,
1205 1208 )
1206 1209 coreconfigitem(
1207 1210 b'subrepos', b'git:allowed', default=dynamicdefault,
1208 1211 )
1209 1212 coreconfigitem(
1210 1213 b'subrepos', b'svn:allowed', default=dynamicdefault,
1211 1214 )
1212 1215 coreconfigitem(
1213 1216 b'templates', b'.*', default=None, generic=True,
1214 1217 )
1215 1218 coreconfigitem(
1216 1219 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1217 1220 )
1218 1221 coreconfigitem(
1219 1222 b'trusted', b'groups', default=list,
1220 1223 )
1221 1224 coreconfigitem(
1222 1225 b'trusted', b'users', default=list,
1223 1226 )
1224 1227 coreconfigitem(
1225 1228 b'ui', b'_usedassubrepo', default=False,
1226 1229 )
1227 1230 coreconfigitem(
1228 1231 b'ui', b'allowemptycommit', default=False,
1229 1232 )
1230 1233 coreconfigitem(
1231 1234 b'ui', b'archivemeta', default=True,
1232 1235 )
1233 1236 coreconfigitem(
1234 1237 b'ui', b'askusername', default=False,
1235 1238 )
1236 1239 coreconfigitem(
1237 1240 b'ui', b'available-memory', default=None,
1238 1241 )
1239 1242
1240 1243 coreconfigitem(
1241 1244 b'ui', b'clonebundlefallback', default=False,
1242 1245 )
1243 1246 coreconfigitem(
1244 1247 b'ui', b'clonebundleprefers', default=list,
1245 1248 )
1246 1249 coreconfigitem(
1247 1250 b'ui', b'clonebundles', default=True,
1248 1251 )
1249 1252 coreconfigitem(
1250 1253 b'ui', b'color', default=b'auto',
1251 1254 )
1252 1255 coreconfigitem(
1253 1256 b'ui', b'commitsubrepos', default=False,
1254 1257 )
1255 1258 coreconfigitem(
1256 1259 b'ui', b'debug', default=False,
1257 1260 )
1258 1261 coreconfigitem(
1259 1262 b'ui', b'debugger', default=None,
1260 1263 )
1261 1264 coreconfigitem(
1262 1265 b'ui', b'editor', default=dynamicdefault,
1263 1266 )
1264 1267 coreconfigitem(
1265 1268 b'ui', b'fallbackencoding', default=None,
1266 1269 )
1267 1270 coreconfigitem(
1268 1271 b'ui', b'forcecwd', default=None,
1269 1272 )
1270 1273 coreconfigitem(
1271 1274 b'ui', b'forcemerge', default=None,
1272 1275 )
1273 1276 coreconfigitem(
1274 1277 b'ui', b'formatdebug', default=False,
1275 1278 )
1276 1279 coreconfigitem(
1277 1280 b'ui', b'formatjson', default=False,
1278 1281 )
1279 1282 coreconfigitem(
1280 1283 b'ui', b'formatted', default=None,
1281 1284 )
1282 1285 coreconfigitem(
1283 1286 b'ui', b'graphnodetemplate', default=None,
1284 1287 )
1285 1288 coreconfigitem(
1286 1289 b'ui', b'interactive', default=None,
1287 1290 )
1288 1291 coreconfigitem(
1289 1292 b'ui', b'interface', default=None,
1290 1293 )
1291 1294 coreconfigitem(
1292 1295 b'ui', b'interface.chunkselector', default=None,
1293 1296 )
1294 1297 coreconfigitem(
1295 1298 b'ui', b'large-file-limit', default=10000000,
1296 1299 )
1297 1300 coreconfigitem(
1298 1301 b'ui', b'logblockedtimes', default=False,
1299 1302 )
1300 1303 coreconfigitem(
1301 1304 b'ui', b'logtemplate', default=None,
1302 1305 )
1303 1306 coreconfigitem(
1304 1307 b'ui', b'merge', default=None,
1305 1308 )
1306 1309 coreconfigitem(
1307 1310 b'ui', b'mergemarkers', default=b'basic',
1308 1311 )
1309 1312 coreconfigitem(
1310 1313 b'ui',
1311 1314 b'mergemarkertemplate',
1312 1315 default=(
1313 1316 b'{node|short} '
1314 1317 b'{ifeq(tags, "tip", "", '
1315 1318 b'ifeq(tags, "", "", "{tags} "))}'
1316 1319 b'{if(bookmarks, "{bookmarks} ")}'
1317 1320 b'{ifeq(branch, "default", "", "{branch} ")}'
1318 1321 b'- {author|user}: {desc|firstline}'
1319 1322 ),
1320 1323 )
1321 1324 coreconfigitem(
1322 1325 b'ui', b'message-output', default=b'stdio',
1323 1326 )
1324 1327 coreconfigitem(
1325 1328 b'ui', b'nontty', default=False,
1326 1329 )
1327 1330 coreconfigitem(
1328 1331 b'ui', b'origbackuppath', default=None,
1329 1332 )
1330 1333 coreconfigitem(
1331 1334 b'ui', b'paginate', default=True,
1332 1335 )
1333 1336 coreconfigitem(
1334 1337 b'ui', b'patch', default=None,
1335 1338 )
1336 1339 coreconfigitem(
1337 1340 b'ui', b'pre-merge-tool-output-template', default=None,
1338 1341 )
1339 1342 coreconfigitem(
1340 1343 b'ui', b'portablefilenames', default=b'warn',
1341 1344 )
1342 1345 coreconfigitem(
1343 1346 b'ui', b'promptecho', default=False,
1344 1347 )
1345 1348 coreconfigitem(
1346 1349 b'ui', b'quiet', default=False,
1347 1350 )
1348 1351 coreconfigitem(
1349 1352 b'ui', b'quietbookmarkmove', default=False,
1350 1353 )
1351 1354 coreconfigitem(
1352 1355 b'ui', b'relative-paths', default=b'legacy',
1353 1356 )
1354 1357 coreconfigitem(
1355 1358 b'ui', b'remotecmd', default=b'hg',
1356 1359 )
1357 1360 coreconfigitem(
1358 1361 b'ui', b'report_untrusted', default=True,
1359 1362 )
1360 1363 coreconfigitem(
1361 1364 b'ui', b'rollback', default=True,
1362 1365 )
1363 1366 coreconfigitem(
1364 1367 b'ui', b'signal-safe-lock', default=True,
1365 1368 )
1366 1369 coreconfigitem(
1367 1370 b'ui', b'slash', default=False,
1368 1371 )
1369 1372 coreconfigitem(
1370 1373 b'ui', b'ssh', default=b'ssh',
1371 1374 )
1372 1375 coreconfigitem(
1373 1376 b'ui', b'ssherrorhint', default=None,
1374 1377 )
1375 1378 coreconfigitem(
1376 1379 b'ui', b'statuscopies', default=False,
1377 1380 )
1378 1381 coreconfigitem(
1379 1382 b'ui', b'strict', default=False,
1380 1383 )
1381 1384 coreconfigitem(
1382 1385 b'ui', b'style', default=b'',
1383 1386 )
1384 1387 coreconfigitem(
1385 1388 b'ui', b'supportcontact', default=None,
1386 1389 )
1387 1390 coreconfigitem(
1388 1391 b'ui', b'textwidth', default=78,
1389 1392 )
1390 1393 coreconfigitem(
1391 1394 b'ui', b'timeout', default=b'600',
1392 1395 )
1393 1396 coreconfigitem(
1394 1397 b'ui', b'timeout.warn', default=0,
1395 1398 )
1396 1399 coreconfigitem(
1397 1400 b'ui', b'timestamp-output', default=False,
1398 1401 )
1399 1402 coreconfigitem(
1400 1403 b'ui', b'traceback', default=False,
1401 1404 )
1402 1405 coreconfigitem(
1403 1406 b'ui', b'tweakdefaults', default=False,
1404 1407 )
1405 1408 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1406 1409 coreconfigitem(
1407 1410 b'ui', b'verbose', default=False,
1408 1411 )
1409 1412 coreconfigitem(
1410 1413 b'verify', b'skipflags', default=None,
1411 1414 )
1412 1415 coreconfigitem(
1413 1416 b'web', b'allowbz2', default=False,
1414 1417 )
1415 1418 coreconfigitem(
1416 1419 b'web', b'allowgz', default=False,
1417 1420 )
1418 1421 coreconfigitem(
1419 1422 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1420 1423 )
1421 1424 coreconfigitem(
1422 1425 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1423 1426 )
1424 1427 coreconfigitem(
1425 1428 b'web', b'allowzip', default=False,
1426 1429 )
1427 1430 coreconfigitem(
1428 1431 b'web', b'archivesubrepos', default=False,
1429 1432 )
1430 1433 coreconfigitem(
1431 1434 b'web', b'cache', default=True,
1432 1435 )
1433 1436 coreconfigitem(
1434 1437 b'web', b'comparisoncontext', default=5,
1435 1438 )
1436 1439 coreconfigitem(
1437 1440 b'web', b'contact', default=None,
1438 1441 )
1439 1442 coreconfigitem(
1440 1443 b'web', b'deny_push', default=list,
1441 1444 )
1442 1445 coreconfigitem(
1443 1446 b'web', b'guessmime', default=False,
1444 1447 )
1445 1448 coreconfigitem(
1446 1449 b'web', b'hidden', default=False,
1447 1450 )
1448 1451 coreconfigitem(
1449 1452 b'web', b'labels', default=list,
1450 1453 )
1451 1454 coreconfigitem(
1452 1455 b'web', b'logoimg', default=b'hglogo.png',
1453 1456 )
1454 1457 coreconfigitem(
1455 1458 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1456 1459 )
1457 1460 coreconfigitem(
1458 1461 b'web', b'accesslog', default=b'-',
1459 1462 )
1460 1463 coreconfigitem(
1461 1464 b'web', b'address', default=b'',
1462 1465 )
1463 1466 coreconfigitem(
1464 1467 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1465 1468 )
1466 1469 coreconfigitem(
1467 1470 b'web', b'allow_read', default=list,
1468 1471 )
1469 1472 coreconfigitem(
1470 1473 b'web', b'baseurl', default=None,
1471 1474 )
1472 1475 coreconfigitem(
1473 1476 b'web', b'cacerts', default=None,
1474 1477 )
1475 1478 coreconfigitem(
1476 1479 b'web', b'certificate', default=None,
1477 1480 )
1478 1481 coreconfigitem(
1479 1482 b'web', b'collapse', default=False,
1480 1483 )
1481 1484 coreconfigitem(
1482 1485 b'web', b'csp', default=None,
1483 1486 )
1484 1487 coreconfigitem(
1485 1488 b'web', b'deny_read', default=list,
1486 1489 )
1487 1490 coreconfigitem(
1488 1491 b'web', b'descend', default=True,
1489 1492 )
1490 1493 coreconfigitem(
1491 1494 b'web', b'description', default=b"",
1492 1495 )
1493 1496 coreconfigitem(
1494 1497 b'web', b'encoding', default=lambda: encoding.encoding,
1495 1498 )
1496 1499 coreconfigitem(
1497 1500 b'web', b'errorlog', default=b'-',
1498 1501 )
1499 1502 coreconfigitem(
1500 1503 b'web', b'ipv6', default=False,
1501 1504 )
1502 1505 coreconfigitem(
1503 1506 b'web', b'maxchanges', default=10,
1504 1507 )
1505 1508 coreconfigitem(
1506 1509 b'web', b'maxfiles', default=10,
1507 1510 )
1508 1511 coreconfigitem(
1509 1512 b'web', b'maxshortchanges', default=60,
1510 1513 )
1511 1514 coreconfigitem(
1512 1515 b'web', b'motd', default=b'',
1513 1516 )
1514 1517 coreconfigitem(
1515 1518 b'web', b'name', default=dynamicdefault,
1516 1519 )
1517 1520 coreconfigitem(
1518 1521 b'web', b'port', default=8000,
1519 1522 )
1520 1523 coreconfigitem(
1521 1524 b'web', b'prefix', default=b'',
1522 1525 )
1523 1526 coreconfigitem(
1524 1527 b'web', b'push_ssl', default=True,
1525 1528 )
1526 1529 coreconfigitem(
1527 1530 b'web', b'refreshinterval', default=20,
1528 1531 )
1529 1532 coreconfigitem(
1530 1533 b'web', b'server-header', default=None,
1531 1534 )
1532 1535 coreconfigitem(
1533 1536 b'web', b'static', default=None,
1534 1537 )
1535 1538 coreconfigitem(
1536 1539 b'web', b'staticurl', default=None,
1537 1540 )
1538 1541 coreconfigitem(
1539 1542 b'web', b'stripes', default=1,
1540 1543 )
1541 1544 coreconfigitem(
1542 1545 b'web', b'style', default=b'paper',
1543 1546 )
1544 1547 coreconfigitem(
1545 1548 b'web', b'templates', default=None,
1546 1549 )
1547 1550 coreconfigitem(
1548 1551 b'web', b'view', default=b'served', experimental=True,
1549 1552 )
1550 1553 coreconfigitem(
1551 1554 b'worker', b'backgroundclose', default=dynamicdefault,
1552 1555 )
1553 1556 # Windows defaults to a limit of 512 open files. A buffer of 128
1554 1557 # should give us enough headway.
1555 1558 coreconfigitem(
1556 1559 b'worker', b'backgroundclosemaxqueue', default=384,
1557 1560 )
1558 1561 coreconfigitem(
1559 1562 b'worker', b'backgroundcloseminfilecount', default=2048,
1560 1563 )
1561 1564 coreconfigitem(
1562 1565 b'worker', b'backgroundclosethreadcount', default=4,
1563 1566 )
1564 1567 coreconfigitem(
1565 1568 b'worker', b'enabled', default=True,
1566 1569 )
1567 1570 coreconfigitem(
1568 1571 b'worker', b'numcpus', default=None,
1569 1572 )
1570 1573
1571 1574 # Rebase related configuration moved to core because other extension are doing
1572 1575 # strange things. For example, shelve import the extensions to reuse some bit
1573 1576 # without formally loading it.
1574 1577 coreconfigitem(
1575 1578 b'commands', b'rebase.requiredest', default=False,
1576 1579 )
1577 1580 coreconfigitem(
1578 1581 b'experimental', b'rebaseskipobsolete', default=True,
1579 1582 )
1580 1583 coreconfigitem(
1581 1584 b'rebase', b'singletransaction', default=False,
1582 1585 )
1583 1586 coreconfigitem(
1584 1587 b'rebase', b'experimental.inmemory', default=False,
1585 1588 )
@@ -1,2274 +1,2283 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import stat
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 modifiednodeid,
19 19 nullid,
20 20 nullrev,
21 21 )
22 22 from .thirdparty import attr
23 23 from . import (
24 24 copies,
25 25 encoding,
26 26 error,
27 27 filemerge,
28 28 match as matchmod,
29 29 mergestate as mergestatemod,
30 30 obsutil,
31 31 pathutil,
32 32 pycompat,
33 33 scmutil,
34 34 subrepoutil,
35 35 util,
36 36 worker,
37 37 )
38 38
39 39 _pack = struct.pack
40 40 _unpack = struct.unpack
41 41
42 42
43 43 def _getcheckunknownconfig(repo, section, name):
44 44 config = repo.ui.config(section, name)
45 45 valid = [b'abort', b'ignore', b'warn']
46 46 if config not in valid:
47 47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
48 48 raise error.ConfigError(
49 49 _(b"%s.%s not valid ('%s' is none of %s)")
50 50 % (section, name, config, validstr)
51 51 )
52 52 return config
53 53
54 54
55 55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
56 56 if wctx.isinmemory():
57 57 # Nothing to do in IMM because nothing in the "working copy" can be an
58 58 # unknown file.
59 59 #
60 60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
61 61 # because that function does other useful work.
62 62 return False
63 63
64 64 if f2 is None:
65 65 f2 = f
66 66 return (
67 67 repo.wvfs.audit.check(f)
68 68 and repo.wvfs.isfileorlink(f)
69 69 and repo.dirstate.normalize(f) not in repo.dirstate
70 70 and mctx[f2].cmp(wctx[f])
71 71 )
72 72
73 73
74 74 class _unknowndirschecker(object):
75 75 """
76 76 Look for any unknown files or directories that may have a path conflict
77 77 with a file. If any path prefix of the file exists as a file or link,
78 78 then it conflicts. If the file itself is a directory that contains any
79 79 file that is not tracked, then it conflicts.
80 80
81 81 Returns the shortest path at which a conflict occurs, or None if there is
82 82 no conflict.
83 83 """
84 84
85 85 def __init__(self):
86 86 # A set of paths known to be good. This prevents repeated checking of
87 87 # dirs. It will be updated with any new dirs that are checked and found
88 88 # to be safe.
89 89 self._unknowndircache = set()
90 90
91 91 # A set of paths that are known to be absent. This prevents repeated
92 92 # checking of subdirectories that are known not to exist. It will be
93 93 # updated with any new dirs that are checked and found to be absent.
94 94 self._missingdircache = set()
95 95
96 96 def __call__(self, repo, wctx, f):
97 97 if wctx.isinmemory():
98 98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
99 99 return False
100 100
101 101 # Check for path prefixes that exist as unknown files.
102 102 for p in reversed(list(pathutil.finddirs(f))):
103 103 if p in self._missingdircache:
104 104 return
105 105 if p in self._unknowndircache:
106 106 continue
107 107 if repo.wvfs.audit.check(p):
108 108 if (
109 109 repo.wvfs.isfileorlink(p)
110 110 and repo.dirstate.normalize(p) not in repo.dirstate
111 111 ):
112 112 return p
113 113 if not repo.wvfs.lexists(p):
114 114 self._missingdircache.add(p)
115 115 return
116 116 self._unknowndircache.add(p)
117 117
118 118 # Check if the file conflicts with a directory containing unknown files.
119 119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
120 120 # Does the directory contain any files that are not in the dirstate?
121 121 for p, dirs, files in repo.wvfs.walk(f):
122 122 for fn in files:
123 123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
124 124 relf = repo.dirstate.normalize(relf, isknown=True)
125 125 if relf not in repo.dirstate:
126 126 return f
127 127 return None
128 128
129 129
130 130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
131 131 """
132 132 Considers any actions that care about the presence of conflicting unknown
133 133 files. For some actions, the result is to abort; for others, it is to
134 134 choose a different action.
135 135 """
136 136 fileconflicts = set()
137 137 pathconflicts = set()
138 138 warnconflicts = set()
139 139 abortconflicts = set()
140 140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
141 141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
142 142 pathconfig = repo.ui.configbool(
143 143 b'experimental', b'merge.checkpathconflicts'
144 144 )
145 145 if not force:
146 146
147 147 def collectconflicts(conflicts, config):
148 148 if config == b'abort':
149 149 abortconflicts.update(conflicts)
150 150 elif config == b'warn':
151 151 warnconflicts.update(conflicts)
152 152
153 153 checkunknowndirs = _unknowndirschecker()
154 154 for f in mresult.files(
155 155 (
156 156 mergestatemod.ACTION_CREATED,
157 157 mergestatemod.ACTION_DELETED_CHANGED,
158 158 )
159 159 ):
160 160 if _checkunknownfile(repo, wctx, mctx, f):
161 161 fileconflicts.add(f)
162 162 elif pathconfig and f not in wctx:
163 163 path = checkunknowndirs(repo, wctx, f)
164 164 if path is not None:
165 165 pathconflicts.add(path)
166 166 for f, args, msg in mresult.getactions(
167 167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
168 168 ):
169 169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
170 170 fileconflicts.add(f)
171 171
172 172 allconflicts = fileconflicts | pathconflicts
173 173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
174 174 unknownconflicts = allconflicts - ignoredconflicts
175 175 collectconflicts(ignoredconflicts, ignoredconfig)
176 176 collectconflicts(unknownconflicts, unknownconfig)
177 177 else:
178 178 for f, args, msg in list(
179 179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
180 180 ):
181 181 fl2, anc = args
182 182 different = _checkunknownfile(repo, wctx, mctx, f)
183 183 if repo.dirstate._ignore(f):
184 184 config = ignoredconfig
185 185 else:
186 186 config = unknownconfig
187 187
188 188 # The behavior when force is True is described by this table:
189 189 # config different mergeforce | action backup
190 190 # * n * | get n
191 191 # * y y | merge -
192 192 # abort y n | merge - (1)
193 193 # warn y n | warn + get y
194 194 # ignore y n | get y
195 195 #
196 196 # (1) this is probably the wrong behavior here -- we should
197 197 # probably abort, but some actions like rebases currently
198 198 # don't like an abort happening in the middle of
199 199 # merge.update.
200 200 if not different:
201 201 mresult.addfile(
202 202 f,
203 203 mergestatemod.ACTION_GET,
204 204 (fl2, False),
205 205 b'remote created',
206 206 )
207 207 elif mergeforce or config == b'abort':
208 208 mresult.addfile(
209 209 f,
210 210 mergestatemod.ACTION_MERGE,
211 211 (f, f, None, False, anc),
212 212 b'remote differs from untracked local',
213 213 )
214 214 elif config == b'abort':
215 215 abortconflicts.add(f)
216 216 else:
217 217 if config == b'warn':
218 218 warnconflicts.add(f)
219 219 mresult.addfile(
220 220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
221 221 )
222 222
223 223 for f in sorted(abortconflicts):
224 224 warn = repo.ui.warn
225 225 if f in pathconflicts:
226 226 if repo.wvfs.isfileorlink(f):
227 227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
228 228 else:
229 229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
230 230 else:
231 231 warn(_(b"%s: untracked file differs\n") % f)
232 232 if abortconflicts:
233 233 raise error.Abort(
234 234 _(
235 235 b"untracked files in working directory "
236 236 b"differ from files in requested revision"
237 237 )
238 238 )
239 239
240 240 for f in sorted(warnconflicts):
241 241 if repo.wvfs.isfileorlink(f):
242 242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
243 243 else:
244 244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
245 245
246 246 for f, args, msg in list(
247 247 mresult.getactions([mergestatemod.ACTION_CREATED])
248 248 ):
249 249 backup = (
250 250 f in fileconflicts
251 251 or f in pathconflicts
252 252 or any(p in pathconflicts for p in pathutil.finddirs(f))
253 253 )
254 254 (flags,) = args
255 255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
256 256
257 257
258 258 def _forgetremoved(wctx, mctx, branchmerge, mresult):
259 259 """
260 260 Forget removed files
261 261
262 262 If we're jumping between revisions (as opposed to merging), and if
263 263 neither the working directory nor the target rev has the file,
264 264 then we need to remove it from the dirstate, to prevent the
265 265 dirstate from listing the file when it is no longer in the
266 266 manifest.
267 267
268 268 If we're merging, and the other revision has removed a file
269 269 that is not present in the working directory, we need to mark it
270 270 as removed.
271 271 """
272 272
273 273 m = mergestatemod.ACTION_FORGET
274 274 if branchmerge:
275 275 m = mergestatemod.ACTION_REMOVE
276 276 for f in wctx.deleted():
277 277 if f not in mctx:
278 278 mresult.addfile(f, m, None, b"forget deleted")
279 279
280 280 if not branchmerge:
281 281 for f in wctx.removed():
282 282 if f not in mctx:
283 283 mresult.addfile(
284 284 f, mergestatemod.ACTION_FORGET, None, b"forget removed",
285 285 )
286 286
287 287
288 288 def _checkcollision(repo, wmf, mresult):
289 289 """
290 290 Check for case-folding collisions.
291 291 """
292 292 # If the repo is narrowed, filter out files outside the narrowspec.
293 293 narrowmatch = repo.narrowmatch()
294 294 if not narrowmatch.always():
295 295 pmmf = set(wmf.walk(narrowmatch))
296 296 if mresult:
297 297 for f in list(mresult.files()):
298 298 if not narrowmatch(f):
299 299 mresult.removefile(f)
300 300 else:
301 301 # build provisional merged manifest up
302 302 pmmf = set(wmf)
303 303
304 304 if mresult:
305 305 # KEEP and EXEC are no-op
306 306 for f in mresult.files(
307 307 (
308 308 mergestatemod.ACTION_ADD,
309 309 mergestatemod.ACTION_ADD_MODIFIED,
310 310 mergestatemod.ACTION_FORGET,
311 311 mergestatemod.ACTION_GET,
312 312 mergestatemod.ACTION_CHANGED_DELETED,
313 313 mergestatemod.ACTION_DELETED_CHANGED,
314 314 )
315 315 ):
316 316 pmmf.add(f)
317 317 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
318 318 pmmf.discard(f)
319 319 for f, args, msg in mresult.getactions(
320 320 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
321 321 ):
322 322 f2, flags = args
323 323 pmmf.discard(f2)
324 324 pmmf.add(f)
325 325 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
326 326 pmmf.add(f)
327 327 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
328 328 f1, f2, fa, move, anc = args
329 329 if move:
330 330 pmmf.discard(f1)
331 331 pmmf.add(f)
332 332
333 333 # check case-folding collision in provisional merged manifest
334 334 foldmap = {}
335 335 for f in pmmf:
336 336 fold = util.normcase(f)
337 337 if fold in foldmap:
338 338 raise error.Abort(
339 339 _(b"case-folding collision between %s and %s")
340 340 % (f, foldmap[fold])
341 341 )
342 342 foldmap[fold] = f
343 343
344 344 # check case-folding of directories
345 345 foldprefix = unfoldprefix = lastfull = b''
346 346 for fold, f in sorted(foldmap.items()):
347 347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
348 348 # the folded prefix matches but actual casing is different
349 349 raise error.Abort(
350 350 _(b"case-folding collision between %s and directory of %s")
351 351 % (lastfull, f)
352 352 )
353 353 foldprefix = fold + b'/'
354 354 unfoldprefix = f + b'/'
355 355 lastfull = f
356 356
357 357
358 358 def driverpreprocess(repo, ms, wctx, labels=None):
359 359 """run the preprocess step of the merge driver, if any
360 360
361 361 This is currently not implemented -- it's an extension point."""
362 362 return True
363 363
364 364
365 365 def driverconclude(repo, ms, wctx, labels=None):
366 366 """run the conclude step of the merge driver, if any
367 367
368 368 This is currently not implemented -- it's an extension point."""
369 369 return True
370 370
371 371
372 372 def _filesindirs(repo, manifest, dirs):
373 373 """
374 374 Generator that yields pairs of all the files in the manifest that are found
375 375 inside the directories listed in dirs, and which directory they are found
376 376 in.
377 377 """
378 378 for f in manifest:
379 379 for p in pathutil.finddirs(f):
380 380 if p in dirs:
381 381 yield f, p
382 382 break
383 383
384 384
385 385 def checkpathconflicts(repo, wctx, mctx, mresult):
386 386 """
387 387 Check if any actions introduce path conflicts in the repository, updating
388 388 actions to record or handle the path conflict accordingly.
389 389 """
390 390 mf = wctx.manifest()
391 391
392 392 # The set of local files that conflict with a remote directory.
393 393 localconflicts = set()
394 394
395 395 # The set of directories that conflict with a remote file, and so may cause
396 396 # conflicts if they still contain any files after the merge.
397 397 remoteconflicts = set()
398 398
399 399 # The set of directories that appear as both a file and a directory in the
400 400 # remote manifest. These indicate an invalid remote manifest, which
401 401 # can't be updated to cleanly.
402 402 invalidconflicts = set()
403 403
404 404 # The set of directories that contain files that are being created.
405 405 createdfiledirs = set()
406 406
407 407 # The set of files deleted by all the actions.
408 408 deletedfiles = set()
409 409
410 410 for f in mresult.files(
411 411 (
412 412 mergestatemod.ACTION_CREATED,
413 413 mergestatemod.ACTION_DELETED_CHANGED,
414 414 mergestatemod.ACTION_MERGE,
415 415 mergestatemod.ACTION_CREATED_MERGE,
416 416 )
417 417 ):
418 418 # This action may create a new local file.
419 419 createdfiledirs.update(pathutil.finddirs(f))
420 420 if mf.hasdir(f):
421 421 # The file aliases a local directory. This might be ok if all
422 422 # the files in the local directory are being deleted. This
423 423 # will be checked once we know what all the deleted files are.
424 424 remoteconflicts.add(f)
425 425 # Track the names of all deleted files.
426 426 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
427 427 deletedfiles.add(f)
428 428 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
429 429 f1, f2, fa, move, anc = args
430 430 if move:
431 431 deletedfiles.add(f1)
432 432 for (f, args, msg) in mresult.getactions(
433 433 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
434 434 ):
435 435 f2, flags = args
436 436 deletedfiles.add(f2)
437 437
438 438 # Check all directories that contain created files for path conflicts.
439 439 for p in createdfiledirs:
440 440 if p in mf:
441 441 if p in mctx:
442 442 # A file is in a directory which aliases both a local
443 443 # and a remote file. This is an internal inconsistency
444 444 # within the remote manifest.
445 445 invalidconflicts.add(p)
446 446 else:
447 447 # A file is in a directory which aliases a local file.
448 448 # We will need to rename the local file.
449 449 localconflicts.add(p)
450 450 pd = mresult.getfile(p)
451 451 if pd and pd[0] in (
452 452 mergestatemod.ACTION_CREATED,
453 453 mergestatemod.ACTION_DELETED_CHANGED,
454 454 mergestatemod.ACTION_MERGE,
455 455 mergestatemod.ACTION_CREATED_MERGE,
456 456 ):
457 457 # The file is in a directory which aliases a remote file.
458 458 # This is an internal inconsistency within the remote
459 459 # manifest.
460 460 invalidconflicts.add(p)
461 461
462 462 # Rename all local conflicting files that have not been deleted.
463 463 for p in localconflicts:
464 464 if p not in deletedfiles:
465 465 ctxname = bytes(wctx).rstrip(b'+')
466 466 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
467 467 porig = wctx[p].copysource() or p
468 468 mresult.addfile(
469 469 pnew,
470 470 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
471 471 (p, porig),
472 472 b'local path conflict',
473 473 )
474 474 mresult.addfile(
475 475 p,
476 476 mergestatemod.ACTION_PATH_CONFLICT,
477 477 (pnew, b'l'),
478 478 b'path conflict',
479 479 )
480 480
481 481 if remoteconflicts:
482 482 # Check if all files in the conflicting directories have been removed.
483 483 ctxname = bytes(mctx).rstrip(b'+')
484 484 for f, p in _filesindirs(repo, mf, remoteconflicts):
485 485 if f not in deletedfiles:
486 486 m, args, msg = mresult.getfile(p)
487 487 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
488 488 if m in (
489 489 mergestatemod.ACTION_DELETED_CHANGED,
490 490 mergestatemod.ACTION_MERGE,
491 491 ):
492 492 # Action was merge, just update target.
493 493 mresult.addfile(pnew, m, args, msg)
494 494 else:
495 495 # Action was create, change to renamed get action.
496 496 fl = args[0]
497 497 mresult.addfile(
498 498 pnew,
499 499 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
500 500 (p, fl),
501 501 b'remote path conflict',
502 502 )
503 503 mresult.addfile(
504 504 p,
505 505 mergestatemod.ACTION_PATH_CONFLICT,
506 506 (pnew, mergestatemod.ACTION_REMOVE),
507 507 b'path conflict',
508 508 )
509 509 remoteconflicts.remove(p)
510 510 break
511 511
512 512 if invalidconflicts:
513 513 for p in invalidconflicts:
514 514 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
515 515 raise error.Abort(_(b"destination manifest contains path conflicts"))
516 516
517 517
518 518 def _filternarrowactions(narrowmatch, branchmerge, mresult):
519 519 """
520 520 Filters out actions that can ignored because the repo is narrowed.
521 521
522 522 Raise an exception if the merge cannot be completed because the repo is
523 523 narrowed.
524 524 """
525 525 # TODO: handle with nonconflicttypes
526 526 nooptypes = {mergestatemod.ACTION_KEEP}
527 527 nonconflicttypes = {
528 528 mergestatemod.ACTION_ADD,
529 529 mergestatemod.ACTION_ADD_MODIFIED,
530 530 mergestatemod.ACTION_CREATED,
531 531 mergestatemod.ACTION_CREATED_MERGE,
532 532 mergestatemod.ACTION_FORGET,
533 533 mergestatemod.ACTION_GET,
534 534 mergestatemod.ACTION_REMOVE,
535 535 mergestatemod.ACTION_EXEC,
536 536 }
537 537 # We mutate the items in the dict during iteration, so iterate
538 538 # over a copy.
539 539 for f, action in mresult.filemap():
540 540 if narrowmatch(f):
541 541 pass
542 542 elif not branchmerge:
543 543 mresult.removefile(f) # just updating, ignore changes outside clone
544 544 elif action[0] in nooptypes:
545 545 mresult.removefile(f) # merge does not affect file
546 546 elif action[0] in nonconflicttypes:
547 547 raise error.Abort(
548 548 _(
549 549 b'merge affects file \'%s\' outside narrow, '
550 550 b'which is not yet supported'
551 551 )
552 552 % f,
553 553 hint=_(b'merging in the other direction may work'),
554 554 )
555 555 else:
556 556 raise error.Abort(
557 557 _(b'conflict in file \'%s\' is outside narrow clone') % f
558 558 )
559 559
560 560
561 561 class mergeresult(object):
562 562 ''''An object representing result of merging manifests.
563 563
564 564 It has information about what actions need to be performed on dirstate
565 565 mapping of divergent renames and other such cases. '''
566 566
567 567 def __init__(self):
568 568 """
569 569 filemapping: dict of filename as keys and action related info as values
570 570 diverge: mapping of source name -> list of dest name for
571 571 divergent renames
572 572 renamedelete: mapping of source name -> list of destinations for files
573 573 deleted on one side and renamed on other.
574 574 commitinfo: dict containing data which should be used on commit
575 575 contains a filename -> info mapping
576 576 actionmapping: dict of action names as keys and values are dict of
577 577 filename as key and related data as values
578 578 """
579 579 self._filemapping = {}
580 580 self._diverge = {}
581 581 self._renamedelete = {}
582 582 self._commitinfo = collections.defaultdict(dict)
583 583 self._actionmapping = collections.defaultdict(dict)
584 584
585 585 def updatevalues(self, diverge, renamedelete):
586 586 self._diverge = diverge
587 587 self._renamedelete = renamedelete
588 588
589 589 def addfile(self, filename, action, data, message):
590 590 """ adds a new file to the mergeresult object
591 591
592 592 filename: file which we are adding
593 593 action: one of mergestatemod.ACTION_*
594 594 data: a tuple of information like fctx and ctx related to this merge
595 595 message: a message about the merge
596 596 """
597 597 # if the file already existed, we need to delete it's old
598 598 # entry form _actionmapping too
599 599 if filename in self._filemapping:
600 600 a, d, m = self._filemapping[filename]
601 601 del self._actionmapping[a][filename]
602 602
603 603 self._filemapping[filename] = (action, data, message)
604 604 self._actionmapping[action][filename] = (data, message)
605 605
606 606 def getfile(self, filename, default_return=None):
607 607 """ returns (action, args, msg) about this file
608 608
609 609 returns default_return if the file is not present """
610 610 if filename in self._filemapping:
611 611 return self._filemapping[filename]
612 612 return default_return
613 613
614 614 def files(self, actions=None):
615 615 """ returns files on which provided action needs to perfromed
616 616
617 617 If actions is None, all files are returned
618 618 """
619 619 # TODO: think whether we should return renamedelete and
620 620 # diverge filenames also
621 621 if actions is None:
622 622 for f in self._filemapping:
623 623 yield f
624 624
625 625 else:
626 626 for a in actions:
627 627 for f in self._actionmapping[a]:
628 628 yield f
629 629
630 630 def removefile(self, filename):
631 631 """ removes a file from the mergeresult object as the file might
632 632 not merging anymore """
633 633 action, data, message = self._filemapping[filename]
634 634 del self._filemapping[filename]
635 635 del self._actionmapping[action][filename]
636 636
637 637 def getactions(self, actions, sort=False):
638 638 """ get list of files which are marked with these actions
639 639 if sort is true, files for each action is sorted and then added
640 640
641 641 Returns a list of tuple of form (filename, data, message)
642 642 """
643 643 for a in actions:
644 644 if sort:
645 645 for f in sorted(self._actionmapping[a]):
646 646 args, msg = self._actionmapping[a][f]
647 647 yield f, args, msg
648 648 else:
649 649 for f, (args, msg) in pycompat.iteritems(
650 650 self._actionmapping[a]
651 651 ):
652 652 yield f, args, msg
653 653
654 654 def len(self, actions=None):
655 655 """ returns number of files which needs actions
656 656
657 657 if actions is passed, total of number of files in that action
658 658 only is returned """
659 659
660 660 if actions is None:
661 661 return len(self._filemapping)
662 662
663 663 return sum(len(self._actionmapping[a]) for a in actions)
664 664
665 665 def filemap(self, sort=False):
666 666 if sorted:
667 667 for key, val in sorted(pycompat.iteritems(self._filemapping)):
668 668 yield key, val
669 669 else:
670 670 for key, val in pycompat.iteritems(self._filemapping):
671 671 yield key, val
672 672
673 673 def addcommitinfo(self, filename, key, value):
674 674 """ adds key-value information about filename which will be required
675 675 while committing this merge """
676 676 self._commitinfo[filename][key] = value
677 677
678 678 @property
679 679 def diverge(self):
680 680 return self._diverge
681 681
682 682 @property
683 683 def renamedelete(self):
684 684 return self._renamedelete
685 685
686 686 @property
687 687 def commitinfo(self):
688 688 return self._commitinfo
689 689
690 690 @property
691 691 def actionsdict(self):
692 692 """ returns a dictionary of actions to be perfomed with action as key
693 693 and a list of files and related arguments as values """
694 694 res = collections.defaultdict(list)
695 695 for a, d in pycompat.iteritems(self._actionmapping):
696 696 for f, (args, msg) in pycompat.iteritems(d):
697 697 res[a].append((f, args, msg))
698 698 return res
699 699
700 700 def setactions(self, actions):
701 701 self._filemapping = actions
702 702 self._actionmapping = collections.defaultdict(dict)
703 703 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
704 704 self._actionmapping[act][f] = data, msg
705 705
706 706 def hasconflicts(self):
707 707 """ tells whether this merge resulted in some actions which can
708 708 result in conflicts or not """
709 709 for a in self._actionmapping.keys():
710 710 if (
711 711 a
712 712 not in (
713 713 mergestatemod.ACTION_GET,
714 714 mergestatemod.ACTION_KEEP,
715 715 mergestatemod.ACTION_EXEC,
716 716 mergestatemod.ACTION_REMOVE,
717 717 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
718 718 )
719 719 and self._actionmapping[a]
720 720 ):
721 721 return True
722 722
723 723 return False
724 724
725 725
726 726 def manifestmerge(
727 727 repo,
728 728 wctx,
729 729 p2,
730 730 pa,
731 731 branchmerge,
732 732 force,
733 733 matcher,
734 734 acceptremote,
735 735 followcopies,
736 736 forcefulldiff=False,
737 737 ):
738 738 """
739 739 Merge wctx and p2 with ancestor pa and generate merge action list
740 740
741 741 branchmerge and force are as passed in to update
742 742 matcher = matcher to filter file lists
743 743 acceptremote = accept the incoming changes without prompting
744 744
745 745 Returns an object of mergeresult class
746 746 """
747 747 mresult = mergeresult()
748 748 if matcher is not None and matcher.always():
749 749 matcher = None
750 750
751 751 # manifests fetched in order are going to be faster, so prime the caches
752 752 [
753 753 x.manifest()
754 754 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
755 755 ]
756 756
757 757 branch_copies1 = copies.branch_copies()
758 758 branch_copies2 = copies.branch_copies()
759 759 diverge = {}
760 760 # information from merge which is needed at commit time
761 761 # for example choosing filelog of which parent to commit
762 762 # TODO: use specific constants in future for this mapping
763 763 if followcopies:
764 764 branch_copies1, branch_copies2, diverge = copies.mergecopies(
765 765 repo, wctx, p2, pa
766 766 )
767 767
768 768 boolbm = pycompat.bytestr(bool(branchmerge))
769 769 boolf = pycompat.bytestr(bool(force))
770 770 boolm = pycompat.bytestr(bool(matcher))
771 771 repo.ui.note(_(b"resolving manifests\n"))
772 772 repo.ui.debug(
773 773 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
774 774 )
775 775 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
776 776
777 777 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
778 778 copied1 = set(branch_copies1.copy.values())
779 779 copied1.update(branch_copies1.movewithdir.values())
780 780 copied2 = set(branch_copies2.copy.values())
781 781 copied2.update(branch_copies2.movewithdir.values())
782 782
783 783 if b'.hgsubstate' in m1 and wctx.rev() is None:
784 784 # Check whether sub state is modified, and overwrite the manifest
785 785 # to flag the change. If wctx is a committed revision, we shouldn't
786 786 # care for the dirty state of the working directory.
787 787 if any(wctx.sub(s).dirty() for s in wctx.substate):
788 788 m1[b'.hgsubstate'] = modifiednodeid
789 789
790 790 # Don't use m2-vs-ma optimization if:
791 791 # - ma is the same as m1 or m2, which we're just going to diff again later
792 792 # - The caller specifically asks for a full diff, which is useful during bid
793 793 # merge.
794 794 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
795 795 # Identify which files are relevant to the merge, so we can limit the
796 796 # total m1-vs-m2 diff to just those files. This has significant
797 797 # performance benefits in large repositories.
798 798 relevantfiles = set(ma.diff(m2).keys())
799 799
800 800 # For copied and moved files, we need to add the source file too.
801 801 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
802 802 if copyvalue in relevantfiles:
803 803 relevantfiles.add(copykey)
804 804 for movedirkey in branch_copies1.movewithdir:
805 805 relevantfiles.add(movedirkey)
806 806 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
807 807 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
808 808
809 809 diff = m1.diff(m2, match=matcher)
810 810
811 811 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
812 812 if n1 and n2: # file exists on both local and remote side
813 813 if f not in ma:
814 814 # TODO: what if they're renamed from different sources?
815 815 fa = branch_copies1.copy.get(
816 816 f, None
817 817 ) or branch_copies2.copy.get(f, None)
818 818 args, msg = None, None
819 819 if fa is not None:
820 820 args = (f, f, fa, False, pa.node())
821 821 msg = b'both renamed from %s' % fa
822 822 else:
823 823 args = (f, f, None, False, pa.node())
824 824 msg = b'both created'
825 825 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
826 826 else:
827 827 a = ma[f]
828 828 fla = ma.flags(f)
829 829 nol = b'l' not in fl1 + fl2 + fla
830 830 if n2 == a and fl2 == fla:
831 831 mresult.addfile(
832 832 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
833 833 )
834 834 elif n1 == a and fl1 == fla: # local unchanged - use remote
835 835 if n1 == n2: # optimization: keep local content
836 836 mresult.addfile(
837 837 f,
838 838 mergestatemod.ACTION_EXEC,
839 839 (fl2,),
840 840 b'update permissions',
841 841 )
842 842 else:
843 843 mresult.addfile(
844 844 f,
845 845 mergestatemod.ACTION_GET,
846 846 (fl2, False),
847 847 b'remote is newer',
848 848 )
849 849 if branchmerge:
850 850 mresult.addcommitinfo(
851 851 f, b'filenode-source', b'other'
852 852 )
853 853 elif nol and n2 == a: # remote only changed 'x'
854 854 mresult.addfile(
855 855 f,
856 856 mergestatemod.ACTION_EXEC,
857 857 (fl2,),
858 858 b'update permissions',
859 859 )
860 860 elif nol and n1 == a: # local only changed 'x'
861 861 mresult.addfile(
862 862 f,
863 863 mergestatemod.ACTION_GET,
864 864 (fl1, False),
865 865 b'remote is newer',
866 866 )
867 867 if branchmerge:
868 868 mresult.addcommitinfo(f, b'filenode-source', b'other')
869 869 else: # both changed something
870 870 mresult.addfile(
871 871 f,
872 872 mergestatemod.ACTION_MERGE,
873 873 (f, f, f, False, pa.node()),
874 874 b'versions differ',
875 875 )
876 876 elif n1: # file exists only on local side
877 877 if f in copied2:
878 878 pass # we'll deal with it on m2 side
879 879 elif (
880 880 f in branch_copies1.movewithdir
881 881 ): # directory rename, move local
882 882 f2 = branch_copies1.movewithdir[f]
883 883 if f2 in m2:
884 884 mresult.addfile(
885 885 f2,
886 886 mergestatemod.ACTION_MERGE,
887 887 (f, f2, None, True, pa.node()),
888 888 b'remote directory rename, both created',
889 889 )
890 890 else:
891 891 mresult.addfile(
892 892 f2,
893 893 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
894 894 (f, fl1),
895 895 b'remote directory rename - move from %s' % f,
896 896 )
897 897 elif f in branch_copies1.copy:
898 898 f2 = branch_copies1.copy[f]
899 899 mresult.addfile(
900 900 f,
901 901 mergestatemod.ACTION_MERGE,
902 902 (f, f2, f2, False, pa.node()),
903 903 b'local copied/moved from %s' % f2,
904 904 )
905 905 elif f in ma: # clean, a different, no remote
906 906 if n1 != ma[f]:
907 907 if acceptremote:
908 908 mresult.addfile(
909 909 f,
910 910 mergestatemod.ACTION_REMOVE,
911 911 None,
912 912 b'remote delete',
913 913 )
914 914 else:
915 915 mresult.addfile(
916 916 f,
917 917 mergestatemod.ACTION_CHANGED_DELETED,
918 918 (f, None, f, False, pa.node()),
919 919 b'prompt changed/deleted',
920 920 )
921 921 elif n1 == addednodeid:
922 922 # This file was locally added. We should forget it instead of
923 923 # deleting it.
924 924 mresult.addfile(
925 925 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
926 926 )
927 927 else:
928 928 mresult.addfile(
929 929 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
930 930 )
931 931 elif n2: # file exists only on remote side
932 932 if f in copied1:
933 933 pass # we'll deal with it on m1 side
934 934 elif f in branch_copies2.movewithdir:
935 935 f2 = branch_copies2.movewithdir[f]
936 936 if f2 in m1:
937 937 mresult.addfile(
938 938 f2,
939 939 mergestatemod.ACTION_MERGE,
940 940 (f2, f, None, False, pa.node()),
941 941 b'local directory rename, both created',
942 942 )
943 943 else:
944 944 mresult.addfile(
945 945 f2,
946 946 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
947 947 (f, fl2),
948 948 b'local directory rename - get from %s' % f,
949 949 )
950 950 elif f in branch_copies2.copy:
951 951 f2 = branch_copies2.copy[f]
952 952 msg, args = None, None
953 953 if f2 in m2:
954 954 args = (f2, f, f2, False, pa.node())
955 955 msg = b'remote copied from %s' % f2
956 956 else:
957 957 args = (f2, f, f2, True, pa.node())
958 958 msg = b'remote moved from %s' % f2
959 959 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
960 960 elif f not in ma:
961 961 # local unknown, remote created: the logic is described by the
962 962 # following table:
963 963 #
964 964 # force branchmerge different | action
965 965 # n * * | create
966 966 # y n * | create
967 967 # y y n | create
968 968 # y y y | merge
969 969 #
970 970 # Checking whether the files are different is expensive, so we
971 971 # don't do that when we can avoid it.
972 972 if not force:
973 973 mresult.addfile(
974 974 f,
975 975 mergestatemod.ACTION_CREATED,
976 976 (fl2,),
977 977 b'remote created',
978 978 )
979 979 elif not branchmerge:
980 980 mresult.addfile(
981 981 f,
982 982 mergestatemod.ACTION_CREATED,
983 983 (fl2,),
984 984 b'remote created',
985 985 )
986 986 else:
987 987 mresult.addfile(
988 988 f,
989 989 mergestatemod.ACTION_CREATED_MERGE,
990 990 (fl2, pa.node()),
991 991 b'remote created, get or merge',
992 992 )
993 993 elif n2 != ma[f]:
994 994 df = None
995 995 for d in branch_copies1.dirmove:
996 996 if f.startswith(d):
997 997 # new file added in a directory that was moved
998 998 df = branch_copies1.dirmove[d] + f[len(d) :]
999 999 break
1000 1000 if df is not None and df in m1:
1001 1001 mresult.addfile(
1002 1002 df,
1003 1003 mergestatemod.ACTION_MERGE,
1004 1004 (df, f, f, False, pa.node()),
1005 1005 b'local directory rename - respect move '
1006 1006 b'from %s' % f,
1007 1007 )
1008 1008 elif acceptremote:
1009 1009 mresult.addfile(
1010 1010 f,
1011 1011 mergestatemod.ACTION_CREATED,
1012 1012 (fl2,),
1013 1013 b'remote recreating',
1014 1014 )
1015 1015 else:
1016 1016 mresult.addfile(
1017 1017 f,
1018 1018 mergestatemod.ACTION_DELETED_CHANGED,
1019 1019 (None, f, f, False, pa.node()),
1020 1020 b'prompt deleted/changed',
1021 1021 )
1022 1022
1023 1023 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1024 1024 # If we are merging, look for path conflicts.
1025 1025 checkpathconflicts(repo, wctx, p2, mresult)
1026 1026
1027 1027 narrowmatch = repo.narrowmatch()
1028 1028 if not narrowmatch.always():
1029 1029 # Updates "actions" in place
1030 1030 _filternarrowactions(narrowmatch, branchmerge, mresult)
1031 1031
1032 1032 renamedelete = branch_copies1.renamedelete
1033 1033 renamedelete.update(branch_copies2.renamedelete)
1034 1034
1035 1035 mresult.updatevalues(diverge, renamedelete)
1036 1036 return mresult
1037 1037
1038 1038
1039 1039 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1040 1040 """Resolves false conflicts where the nodeid changed but the content
1041 1041 remained the same."""
1042 1042 # We force a copy of actions.items() because we're going to mutate
1043 1043 # actions as we resolve trivial conflicts.
1044 1044 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1045 1045 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1046 1046 # local did change but ended up with same content
1047 1047 mresult.addfile(
1048 1048 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1049 1049 )
1050 1050
1051 1051 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1052 1052 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1053 1053 # remote did change but ended up with same content
1054 1054 mresult.removefile(f) # don't get = keep local deleted
1055 1055
1056 1056
1057 1057 def calculateupdates(
1058 1058 repo,
1059 1059 wctx,
1060 1060 mctx,
1061 1061 ancestors,
1062 1062 branchmerge,
1063 1063 force,
1064 1064 acceptremote,
1065 1065 followcopies,
1066 1066 matcher=None,
1067 1067 mergeforce=False,
1068 1068 ):
1069 1069 """
1070 1070 Calculate the actions needed to merge mctx into wctx using ancestors
1071 1071
1072 1072 Uses manifestmerge() to merge manifest and get list of actions required to
1073 1073 perform for merging two manifests. If there are multiple ancestors, uses bid
1074 1074 merge if enabled.
1075 1075
1076 1076 Also filters out actions which are unrequired if repository is sparse.
1077 1077
1078 1078 Returns mergeresult object same as manifestmerge().
1079 1079 """
1080 1080 # Avoid cycle.
1081 1081 from . import sparse
1082 1082
1083 1083 mresult = None
1084 1084 if len(ancestors) == 1: # default
1085 1085 mresult = manifestmerge(
1086 1086 repo,
1087 1087 wctx,
1088 1088 mctx,
1089 1089 ancestors[0],
1090 1090 branchmerge,
1091 1091 force,
1092 1092 matcher,
1093 1093 acceptremote,
1094 1094 followcopies,
1095 1095 )
1096 1096 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1097 1097
1098 1098 else: # only when merge.preferancestor=* - the default
1099 1099 repo.ui.note(
1100 1100 _(b"note: merging %s and %s using bids from ancestors %s\n")
1101 1101 % (
1102 1102 wctx,
1103 1103 mctx,
1104 1104 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1105 1105 )
1106 1106 )
1107 1107
1108 1108 # mapping filename to bids (action method to list af actions)
1109 1109 # {FILENAME1 : BID1, FILENAME2 : BID2}
1110 1110 # BID is another dictionary which contains
1111 1111 # mapping of following form:
1112 1112 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1113 1113 fbids = {}
1114 1114 diverge, renamedelete = None, None
1115 1115 for ancestor in ancestors:
1116 1116 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1117 1117 mresult1 = manifestmerge(
1118 1118 repo,
1119 1119 wctx,
1120 1120 mctx,
1121 1121 ancestor,
1122 1122 branchmerge,
1123 1123 force,
1124 1124 matcher,
1125 1125 acceptremote,
1126 1126 followcopies,
1127 1127 forcefulldiff=True,
1128 1128 )
1129 1129 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1130 1130
1131 1131 # Track the shortest set of warning on the theory that bid
1132 1132 # merge will correctly incorporate more information
1133 1133 if diverge is None or len(mresult1.diverge) < len(diverge):
1134 1134 diverge = mresult1.diverge
1135 1135 if renamedelete is None or len(renamedelete) < len(
1136 1136 mresult1.renamedelete
1137 1137 ):
1138 1138 renamedelete = mresult1.renamedelete
1139 1139
1140 1140 for f, a in mresult1.filemap(sort=True):
1141 1141 m, args, msg = a
1142 1142 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1143 1143 if f in fbids:
1144 1144 d = fbids[f]
1145 1145 if m in d:
1146 1146 d[m].append(a)
1147 1147 else:
1148 1148 d[m] = [a]
1149 1149 else:
1150 1150 fbids[f] = {m: [a]}
1151 1151
1152 1152 # Call for bids
1153 1153 # Pick the best bid for each file
1154 1154 repo.ui.note(
1155 1155 _(b'\nauction for merging merge bids (%d ancestors)\n')
1156 1156 % len(ancestors)
1157 1157 )
1158 1158 mresult = mergeresult()
1159 1159 for f, bids in sorted(fbids.items()):
1160 1160 if repo.ui.debugflag:
1161 1161 repo.ui.debug(b" list of bids for %s:\n" % f)
1162 1162 for m, l in sorted(bids.items()):
1163 1163 for _f, args, msg in l:
1164 1164 repo.ui.debug(b' %s -> %s\n' % (msg, m))
1165 1165 # bids is a mapping from action method to list af actions
1166 1166 # Consensus?
1167 1167 if len(bids) == 1: # all bids are the same kind of method
1168 1168 m, l = list(bids.items())[0]
1169 1169 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1170 1170 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1171 1171 mresult.addfile(f, *l[0])
1172 1172 continue
1173 1173 # If keep is an option, just do it.
1174 1174 if mergestatemod.ACTION_KEEP in bids:
1175 1175 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1176 1176 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1177 1177 continue
1178 1178 # If there are gets and they all agree [how could they not?], do it.
1179 1179 if mergestatemod.ACTION_GET in bids:
1180 1180 ga0 = bids[mergestatemod.ACTION_GET][0]
1181 1181 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1182 1182 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1183 1183 mresult.addfile(f, *ga0)
1184 1184 continue
1185 1185 # TODO: Consider other simple actions such as mode changes
1186 1186 # Handle inefficient democrazy.
1187 1187 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1188 1188 for m, l in sorted(bids.items()):
1189 1189 for _f, args, msg in l:
1190 1190 repo.ui.note(b' %s -> %s\n' % (msg, m))
1191 1191 # Pick random action. TODO: Instead, prompt user when resolving
1192 1192 m, l = list(bids.items())[0]
1193 1193 repo.ui.warn(
1194 1194 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1195 1195 )
1196 1196 mresult.addfile(f, *l[0])
1197 1197 continue
1198 1198 repo.ui.note(_(b'end of auction\n\n'))
1199 1199 mresult.updatevalues(diverge, renamedelete)
1200 1200
1201 1201 if wctx.rev() is None:
1202 1202 _forgetremoved(wctx, mctx, branchmerge, mresult)
1203 1203
1204 1204 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1205 1205 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1206 1206
1207 1207 return mresult
1208 1208
1209 1209
1210 1210 def _getcwd():
1211 1211 try:
1212 1212 return encoding.getcwd()
1213 1213 except OSError as err:
1214 1214 if err.errno == errno.ENOENT:
1215 1215 return None
1216 1216 raise
1217 1217
1218 1218
1219 1219 def batchremove(repo, wctx, actions):
1220 1220 """apply removes to the working directory
1221 1221
1222 1222 yields tuples for progress updates
1223 1223 """
1224 1224 verbose = repo.ui.verbose
1225 1225 cwd = _getcwd()
1226 1226 i = 0
1227 1227 for f, args, msg in actions:
1228 1228 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1229 1229 if verbose:
1230 1230 repo.ui.note(_(b"removing %s\n") % f)
1231 1231 wctx[f].audit()
1232 1232 try:
1233 1233 wctx[f].remove(ignoremissing=True)
1234 1234 except OSError as inst:
1235 1235 repo.ui.warn(
1236 1236 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1237 1237 )
1238 1238 if i == 100:
1239 1239 yield i, f
1240 1240 i = 0
1241 1241 i += 1
1242 1242 if i > 0:
1243 1243 yield i, f
1244 1244
1245 1245 if cwd and not _getcwd():
1246 1246 # cwd was removed in the course of removing files; print a helpful
1247 1247 # warning.
1248 1248 repo.ui.warn(
1249 1249 _(
1250 1250 b"current directory was removed\n"
1251 1251 b"(consider changing to repo root: %s)\n"
1252 1252 )
1253 1253 % repo.root
1254 1254 )
1255 1255
1256 1256
1257 1257 def batchget(repo, mctx, wctx, wantfiledata, actions):
1258 1258 """apply gets to the working directory
1259 1259
1260 1260 mctx is the context to get from
1261 1261
1262 1262 Yields arbitrarily many (False, tuple) for progress updates, followed by
1263 1263 exactly one (True, filedata). When wantfiledata is false, filedata is an
1264 1264 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1265 1265 mtime) of the file f written for each action.
1266 1266 """
1267 1267 filedata = {}
1268 1268 verbose = repo.ui.verbose
1269 1269 fctx = mctx.filectx
1270 1270 ui = repo.ui
1271 1271 i = 0
1272 1272 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1273 1273 for f, (flags, backup), msg in actions:
1274 1274 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1275 1275 if verbose:
1276 1276 repo.ui.note(_(b"getting %s\n") % f)
1277 1277
1278 1278 if backup:
1279 1279 # If a file or directory exists with the same name, back that
1280 1280 # up. Otherwise, look to see if there is a file that conflicts
1281 1281 # with a directory this file is in, and if so, back that up.
1282 1282 conflicting = f
1283 1283 if not repo.wvfs.lexists(f):
1284 1284 for p in pathutil.finddirs(f):
1285 1285 if repo.wvfs.isfileorlink(p):
1286 1286 conflicting = p
1287 1287 break
1288 1288 if repo.wvfs.lexists(conflicting):
1289 1289 orig = scmutil.backuppath(ui, repo, conflicting)
1290 1290 util.rename(repo.wjoin(conflicting), orig)
1291 1291 wfctx = wctx[f]
1292 1292 wfctx.clearunknown()
1293 1293 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1294 1294 size = wfctx.write(
1295 1295 fctx(f).data(),
1296 1296 flags,
1297 1297 backgroundclose=True,
1298 1298 atomictemp=atomictemp,
1299 1299 )
1300 1300 if wantfiledata:
1301 1301 s = wfctx.lstat()
1302 1302 mode = s.st_mode
1303 1303 mtime = s[stat.ST_MTIME]
1304 1304 filedata[f] = (mode, size, mtime) # for dirstate.normal
1305 1305 if i == 100:
1306 1306 yield False, (i, f)
1307 1307 i = 0
1308 1308 i += 1
1309 1309 if i > 0:
1310 1310 yield False, (i, f)
1311 1311 yield True, filedata
1312 1312
1313 1313
1314 1314 def _prefetchfiles(repo, ctx, mresult):
1315 1315 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1316 1316 of merge actions. ``ctx`` is the context being merged in."""
1317 1317
1318 1318 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1319 1319 # don't touch the context to be merged in. 'cd' is skipped, because
1320 1320 # changed/deleted never resolves to something from the remote side.
1321 1321 files = mresult.files(
1322 1322 [
1323 1323 mergestatemod.ACTION_GET,
1324 1324 mergestatemod.ACTION_DELETED_CHANGED,
1325 1325 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1326 1326 mergestatemod.ACTION_MERGE,
1327 1327 ]
1328 1328 )
1329 1329
1330 1330 prefetch = scmutil.prefetchfiles
1331 1331 matchfiles = scmutil.matchfiles
1332 1332 prefetch(
1333 1333 repo, [(ctx.rev(), matchfiles(repo, files),)],
1334 1334 )
1335 1335
1336 1336
1337 1337 @attr.s(frozen=True)
1338 1338 class updateresult(object):
1339 1339 updatedcount = attr.ib()
1340 1340 mergedcount = attr.ib()
1341 1341 removedcount = attr.ib()
1342 1342 unresolvedcount = attr.ib()
1343 1343
1344 1344 def isempty(self):
1345 1345 return not (
1346 1346 self.updatedcount
1347 1347 or self.mergedcount
1348 1348 or self.removedcount
1349 1349 or self.unresolvedcount
1350 1350 )
1351 1351
1352 1352
1353 1353 def applyupdates(
1354 1354 repo, mresult, wctx, mctx, overwrite, wantfiledata, labels=None,
1355 1355 ):
1356 1356 """apply the merge action list to the working directory
1357 1357
1358 1358 mresult is a mergeresult object representing result of the merge
1359 1359 wctx is the working copy context
1360 1360 mctx is the context to be merged into the working copy
1361 1361
1362 1362 Return a tuple of (counts, filedata), where counts is a tuple
1363 1363 (updated, merged, removed, unresolved) that describes how many
1364 1364 files were affected by the update, and filedata is as described in
1365 1365 batchget.
1366 1366 """
1367 1367
1368 1368 _prefetchfiles(repo, mctx, mresult)
1369 1369
1370 1370 updated, merged, removed = 0, 0, 0
1371 1371 ms = mergestatemod.mergestate.clean(
1372 1372 repo, wctx.p1().node(), mctx.node(), labels
1373 1373 )
1374 1374
1375 1375 for f, op in pycompat.iteritems(mresult.commitinfo):
1376 1376 # the other side of filenode was choosen while merging, store this in
1377 1377 # mergestate so that it can be reused on commit
1378 1378 ms.addcommitinfo(f, op)
1379 1379
1380 1380 moves = []
1381 1381
1382 1382 # 'cd' and 'dc' actions are treated like other merge conflicts
1383 1383 mergeactions = list(
1384 1384 mresult.getactions(
1385 1385 [
1386 1386 mergestatemod.ACTION_CHANGED_DELETED,
1387 1387 mergestatemod.ACTION_DELETED_CHANGED,
1388 1388 mergestatemod.ACTION_MERGE,
1389 1389 ],
1390 1390 sort=True,
1391 1391 )
1392 1392 )
1393 1393 for f, args, msg in mergeactions:
1394 1394 f1, f2, fa, move, anc = args
1395 1395 if f == b'.hgsubstate': # merged internally
1396 1396 continue
1397 1397 if f1 is None:
1398 1398 fcl = filemerge.absentfilectx(wctx, fa)
1399 1399 else:
1400 1400 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1401 1401 fcl = wctx[f1]
1402 1402 if f2 is None:
1403 1403 fco = filemerge.absentfilectx(mctx, fa)
1404 1404 else:
1405 1405 fco = mctx[f2]
1406 1406 actx = repo[anc]
1407 1407 if fa in actx:
1408 1408 fca = actx[fa]
1409 1409 else:
1410 1410 # TODO: move to absentfilectx
1411 1411 fca = repo.filectx(f1, fileid=nullrev)
1412 1412 ms.add(fcl, fco, fca, f)
1413 1413 if f1 != f and move:
1414 1414 moves.append(f1)
1415 1415
1416 1416 # remove renamed files after safely stored
1417 1417 for f in moves:
1418 1418 if wctx[f].lexists():
1419 1419 repo.ui.debug(b"removing %s\n" % f)
1420 1420 wctx[f].audit()
1421 1421 wctx[f].remove()
1422 1422
1423 1423 numupdates = mresult.len() - mresult.len((mergestatemod.ACTION_KEEP,))
1424 1424 progress = repo.ui.makeprogress(
1425 1425 _(b'updating'), unit=_(b'files'), total=numupdates
1426 1426 )
1427 1427
1428 1428 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1429 1429 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1430 1430
1431 1431 # record path conflicts
1432 1432 for f, args, msg in mresult.getactions(
1433 1433 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1434 1434 ):
1435 1435 f1, fo = args
1436 1436 s = repo.ui.status
1437 1437 s(
1438 1438 _(
1439 1439 b"%s: path conflict - a file or link has the same name as a "
1440 1440 b"directory\n"
1441 1441 )
1442 1442 % f
1443 1443 )
1444 1444 if fo == b'l':
1445 1445 s(_(b"the local file has been renamed to %s\n") % f1)
1446 1446 else:
1447 1447 s(_(b"the remote file has been renamed to %s\n") % f1)
1448 1448 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1449 1449 ms.addpathconflict(f, f1, fo)
1450 1450 progress.increment(item=f)
1451 1451
1452 1452 # When merging in-memory, we can't support worker processes, so set the
1453 1453 # per-item cost at 0 in that case.
1454 1454 cost = 0 if wctx.isinmemory() else 0.001
1455 1455
1456 1456 # remove in parallel (must come before resolving path conflicts and getting)
1457 1457 prog = worker.worker(
1458 1458 repo.ui,
1459 1459 cost,
1460 1460 batchremove,
1461 1461 (repo, wctx),
1462 1462 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1463 1463 )
1464 1464 for i, item in prog:
1465 1465 progress.increment(step=i, item=item)
1466 1466 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1467 1467
1468 1468 # resolve path conflicts (must come before getting)
1469 1469 for f, args, msg in mresult.getactions(
1470 1470 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1471 1471 ):
1472 1472 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1473 1473 (f0, origf0) = args
1474 1474 if wctx[f0].lexists():
1475 1475 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1476 1476 wctx[f].audit()
1477 1477 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1478 1478 wctx[f0].remove()
1479 1479 progress.increment(item=f)
1480 1480
1481 1481 # get in parallel.
1482 1482 threadsafe = repo.ui.configbool(
1483 1483 b'experimental', b'worker.wdir-get-thread-safe'
1484 1484 )
1485 1485 prog = worker.worker(
1486 1486 repo.ui,
1487 1487 cost,
1488 1488 batchget,
1489 1489 (repo, mctx, wctx, wantfiledata),
1490 1490 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1491 1491 threadsafe=threadsafe,
1492 1492 hasretval=True,
1493 1493 )
1494 1494 getfiledata = {}
1495 1495 for final, res in prog:
1496 1496 if final:
1497 1497 getfiledata = res
1498 1498 else:
1499 1499 i, item = res
1500 1500 progress.increment(step=i, item=item)
1501 1501
1502 1502 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1503 1503 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1504 1504
1505 1505 # forget (manifest only, just log it) (must come first)
1506 1506 for f, args, msg in mresult.getactions(
1507 1507 (mergestatemod.ACTION_FORGET,), sort=True
1508 1508 ):
1509 1509 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1510 1510 progress.increment(item=f)
1511 1511
1512 1512 # re-add (manifest only, just log it)
1513 1513 for f, args, msg in mresult.getactions(
1514 1514 (mergestatemod.ACTION_ADD,), sort=True
1515 1515 ):
1516 1516 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1517 1517 progress.increment(item=f)
1518 1518
1519 1519 # re-add/mark as modified (manifest only, just log it)
1520 1520 for f, args, msg in mresult.getactions(
1521 1521 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1522 1522 ):
1523 1523 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1524 1524 progress.increment(item=f)
1525 1525
1526 1526 # keep (noop, just log it)
1527 1527 for f, args, msg in mresult.getactions(
1528 1528 (mergestatemod.ACTION_KEEP,), sort=True
1529 1529 ):
1530 1530 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1531 1531 # no progress
1532 1532
1533 1533 # directory rename, move local
1534 1534 for f, args, msg in mresult.getactions(
1535 1535 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1536 1536 ):
1537 1537 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1538 1538 progress.increment(item=f)
1539 1539 f0, flags = args
1540 1540 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1541 1541 wctx[f].audit()
1542 1542 wctx[f].write(wctx.filectx(f0).data(), flags)
1543 1543 wctx[f0].remove()
1544 1544
1545 1545 # local directory rename, get
1546 1546 for f, args, msg in mresult.getactions(
1547 1547 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1548 1548 ):
1549 1549 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1550 1550 progress.increment(item=f)
1551 1551 f0, flags = args
1552 1552 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1553 1553 wctx[f].write(mctx.filectx(f0).data(), flags)
1554 1554
1555 1555 # exec
1556 1556 for f, args, msg in mresult.getactions(
1557 1557 (mergestatemod.ACTION_EXEC,), sort=True
1558 1558 ):
1559 1559 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1560 1560 progress.increment(item=f)
1561 1561 (flags,) = args
1562 1562 wctx[f].audit()
1563 1563 wctx[f].setflags(b'l' in flags, b'x' in flags)
1564 1564
1565 1565 # these actions updates the file
1566 1566 updated = mresult.len(
1567 1567 (
1568 1568 mergestatemod.ACTION_GET,
1569 1569 mergestatemod.ACTION_EXEC,
1570 1570 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1571 1571 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1572 1572 )
1573 1573 )
1574 1574 # the ordering is important here -- ms.mergedriver will raise if the merge
1575 1575 # driver has changed, and we want to be able to bypass it when overwrite is
1576 1576 # True
1577 1577 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1578 1578
1579 1579 if usemergedriver:
1580 1580 if wctx.isinmemory():
1581 1581 raise error.InMemoryMergeConflictsError(
1582 1582 b"in-memory merge does not support mergedriver"
1583 1583 )
1584 1584 ms.commit()
1585 1585 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1586 1586 # the driver might leave some files unresolved
1587 1587 unresolvedf = set(ms.unresolved())
1588 1588 if not proceed:
1589 1589 # XXX setting unresolved to at least 1 is a hack to make sure we
1590 1590 # error out
1591 1591 return updateresult(
1592 1592 updated, merged, removed, max(len(unresolvedf), 1)
1593 1593 )
1594 1594 newactions = []
1595 1595 for f, args, msg in mergeactions:
1596 1596 if f in unresolvedf:
1597 1597 newactions.append((f, args, msg))
1598 1598 mergeactions = newactions
1599 1599
1600 1600 try:
1601 1601 # premerge
1602 1602 tocomplete = []
1603 1603 for f, args, msg in mergeactions:
1604 1604 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1605 1605 progress.increment(item=f)
1606 1606 if f == b'.hgsubstate': # subrepo states need updating
1607 1607 subrepoutil.submerge(
1608 1608 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1609 1609 )
1610 1610 continue
1611 1611 wctx[f].audit()
1612 1612 complete, r = ms.preresolve(f, wctx)
1613 1613 if not complete:
1614 1614 numupdates += 1
1615 1615 tocomplete.append((f, args, msg))
1616 1616
1617 1617 # merge
1618 1618 for f, args, msg in tocomplete:
1619 1619 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1620 1620 progress.increment(item=f, total=numupdates)
1621 1621 ms.resolve(f, wctx)
1622 1622
1623 1623 finally:
1624 1624 ms.commit()
1625 1625
1626 1626 unresolved = ms.unresolvedcount()
1627 1627
1628 1628 if (
1629 1629 usemergedriver
1630 1630 and not unresolved
1631 1631 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1632 1632 ):
1633 1633 if not driverconclude(repo, ms, wctx, labels=labels):
1634 1634 # XXX setting unresolved to at least 1 is a hack to make sure we
1635 1635 # error out
1636 1636 unresolved = max(unresolved, 1)
1637 1637
1638 1638 ms.commit()
1639 1639
1640 1640 msupdated, msmerged, msremoved = ms.counts()
1641 1641 updated += msupdated
1642 1642 merged += msmerged
1643 1643 removed += msremoved
1644 1644
1645 1645 extraactions = ms.actions()
1646 1646 if extraactions:
1647 1647 mfiles = {
1648 1648 a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,))
1649 1649 }
1650 1650 for k, acts in pycompat.iteritems(extraactions):
1651 1651 for a in acts:
1652 1652 mresult.addfile(a[0], k, *a[1:])
1653 1653 if k == mergestatemod.ACTION_GET and wantfiledata:
1654 1654 # no filedata until mergestate is updated to provide it
1655 1655 for a in acts:
1656 1656 getfiledata[a[0]] = None
1657 1657 # Remove these files from actions[ACTION_MERGE] as well. This is
1658 1658 # important because in recordupdates, files in actions[ACTION_MERGE]
1659 1659 # are processed after files in other actions, and the merge driver
1660 1660 # might add files to those actions via extraactions above. This can
1661 1661 # lead to a file being recorded twice, with poor results. This is
1662 1662 # especially problematic for actions[ACTION_REMOVE] (currently only
1663 1663 # possible with the merge driver in the initial merge process;
1664 1664 # interrupted merges don't go through this flow).
1665 1665 #
1666 1666 # The real fix here is to have indexes by both file and action so
1667 1667 # that when the action for a file is changed it is automatically
1668 1668 # reflected in the other action lists. But that involves a more
1669 1669 # complex data structure, so this will do for now.
1670 1670 #
1671 1671 # We don't need to do the same operation for 'dc' and 'cd' because
1672 1672 # those lists aren't consulted again.
1673 1673 mfiles.difference_update(a[0] for a in acts)
1674 1674
1675 1675 for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))):
1676 1676 if a[0] not in mfiles:
1677 1677 mresult.removefile(a[0])
1678 1678
1679 1679 progress.complete()
1680 1680 assert len(getfiledata) == (
1681 1681 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1682 1682 )
1683 1683 return updateresult(updated, merged, removed, unresolved), getfiledata
1684 1684
1685 1685
1686 1686 def _advertisefsmonitor(repo, num_gets, p1node):
1687 1687 # Advertise fsmonitor when its presence could be useful.
1688 1688 #
1689 1689 # We only advertise when performing an update from an empty working
1690 1690 # directory. This typically only occurs during initial clone.
1691 1691 #
1692 1692 # We give users a mechanism to disable the warning in case it is
1693 1693 # annoying.
1694 1694 #
1695 1695 # We only allow on Linux and MacOS because that's where fsmonitor is
1696 1696 # considered stable.
1697 1697 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1698 1698 fsmonitorthreshold = repo.ui.configint(
1699 1699 b'fsmonitor', b'warn_update_file_count'
1700 1700 )
1701 # avoid cycle dirstate -> sparse -> merge -> dirstate
1702 from . import dirstate
1703
1704 if dirstate.rustmod is not None:
1705 # When using rust status, fsmonitor becomes necessary at higher sizes
1706 fsmonitorthreshold = repo.ui.configint(
1707 b'fsmonitor', b'warn_update_file_count_rust',
1708 )
1709
1701 1710 try:
1702 1711 # avoid cycle: extensions -> cmdutil -> merge
1703 1712 from . import extensions
1704 1713
1705 1714 extensions.find(b'fsmonitor')
1706 1715 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1707 1716 # We intentionally don't look at whether fsmonitor has disabled
1708 1717 # itself because a) fsmonitor may have already printed a warning
1709 1718 # b) we only care about the config state here.
1710 1719 except KeyError:
1711 1720 fsmonitorenabled = False
1712 1721
1713 1722 if (
1714 1723 fsmonitorwarning
1715 1724 and not fsmonitorenabled
1716 1725 and p1node == nullid
1717 1726 and num_gets >= fsmonitorthreshold
1718 1727 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1719 1728 ):
1720 1729 repo.ui.warn(
1721 1730 _(
1722 1731 b'(warning: large working directory being used without '
1723 1732 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1724 1733 b'see "hg help -e fsmonitor")\n'
1725 1734 )
1726 1735 )
1727 1736
1728 1737
1729 1738 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1730 1739 UPDATECHECK_NONE = b'none'
1731 1740 UPDATECHECK_LINEAR = b'linear'
1732 1741 UPDATECHECK_NO_CONFLICT = b'noconflict'
1733 1742
1734 1743
1735 1744 def update(
1736 1745 repo,
1737 1746 node,
1738 1747 branchmerge,
1739 1748 force,
1740 1749 ancestor=None,
1741 1750 mergeancestor=False,
1742 1751 labels=None,
1743 1752 matcher=None,
1744 1753 mergeforce=False,
1745 1754 updatedirstate=True,
1746 1755 updatecheck=None,
1747 1756 wc=None,
1748 1757 ):
1749 1758 """
1750 1759 Perform a merge between the working directory and the given node
1751 1760
1752 1761 node = the node to update to
1753 1762 branchmerge = whether to merge between branches
1754 1763 force = whether to force branch merging or file overwriting
1755 1764 matcher = a matcher to filter file lists (dirstate not updated)
1756 1765 mergeancestor = whether it is merging with an ancestor. If true,
1757 1766 we should accept the incoming changes for any prompts that occur.
1758 1767 If false, merging with an ancestor (fast-forward) is only allowed
1759 1768 between different named branches. This flag is used by rebase extension
1760 1769 as a temporary fix and should be avoided in general.
1761 1770 labels = labels to use for base, local and other
1762 1771 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1763 1772 this is True, then 'force' should be True as well.
1764 1773
1765 1774 The table below shows all the behaviors of the update command given the
1766 1775 -c/--check and -C/--clean or no options, whether the working directory is
1767 1776 dirty, whether a revision is specified, and the relationship of the parent
1768 1777 rev to the target rev (linear or not). Match from top first. The -n
1769 1778 option doesn't exist on the command line, but represents the
1770 1779 experimental.updatecheck=noconflict option.
1771 1780
1772 1781 This logic is tested by test-update-branches.t.
1773 1782
1774 1783 -c -C -n -m dirty rev linear | result
1775 1784 y y * * * * * | (1)
1776 1785 y * y * * * * | (1)
1777 1786 y * * y * * * | (1)
1778 1787 * y y * * * * | (1)
1779 1788 * y * y * * * | (1)
1780 1789 * * y y * * * | (1)
1781 1790 * * * * * n n | x
1782 1791 * * * * n * * | ok
1783 1792 n n n n y * y | merge
1784 1793 n n n n y y n | (2)
1785 1794 n n n y y * * | merge
1786 1795 n n y n y * * | merge if no conflict
1787 1796 n y n n y * * | discard
1788 1797 y n n n y * * | (3)
1789 1798
1790 1799 x = can't happen
1791 1800 * = don't-care
1792 1801 1 = incompatible options (checked in commands.py)
1793 1802 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1794 1803 3 = abort: uncommitted changes (checked in commands.py)
1795 1804
1796 1805 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1797 1806 to repo[None] if None is passed.
1798 1807
1799 1808 Return the same tuple as applyupdates().
1800 1809 """
1801 1810 # Avoid cycle.
1802 1811 from . import sparse
1803 1812
1804 1813 # This function used to find the default destination if node was None, but
1805 1814 # that's now in destutil.py.
1806 1815 assert node is not None
1807 1816 if not branchmerge and not force:
1808 1817 # TODO: remove the default once all callers that pass branchmerge=False
1809 1818 # and force=False pass a value for updatecheck. We may want to allow
1810 1819 # updatecheck='abort' to better suppport some of these callers.
1811 1820 if updatecheck is None:
1812 1821 updatecheck = UPDATECHECK_LINEAR
1813 1822 if updatecheck not in (
1814 1823 UPDATECHECK_NONE,
1815 1824 UPDATECHECK_LINEAR,
1816 1825 UPDATECHECK_NO_CONFLICT,
1817 1826 ):
1818 1827 raise ValueError(
1819 1828 r'Invalid updatecheck %r (can accept %r)'
1820 1829 % (
1821 1830 updatecheck,
1822 1831 (
1823 1832 UPDATECHECK_NONE,
1824 1833 UPDATECHECK_LINEAR,
1825 1834 UPDATECHECK_NO_CONFLICT,
1826 1835 ),
1827 1836 )
1828 1837 )
1829 1838 if wc is not None and wc.isinmemory():
1830 1839 maybe_wlock = util.nullcontextmanager()
1831 1840 else:
1832 1841 maybe_wlock = repo.wlock()
1833 1842 with maybe_wlock:
1834 1843 if wc is None:
1835 1844 wc = repo[None]
1836 1845 pl = wc.parents()
1837 1846 p1 = pl[0]
1838 1847 p2 = repo[node]
1839 1848 if ancestor is not None:
1840 1849 pas = [repo[ancestor]]
1841 1850 else:
1842 1851 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1843 1852 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1844 1853 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1845 1854 else:
1846 1855 pas = [p1.ancestor(p2, warn=branchmerge)]
1847 1856
1848 1857 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1849 1858
1850 1859 overwrite = force and not branchmerge
1851 1860 ### check phase
1852 1861 if not overwrite:
1853 1862 if len(pl) > 1:
1854 1863 raise error.Abort(_(b"outstanding uncommitted merge"))
1855 1864 ms = mergestatemod.mergestate.read(repo)
1856 1865 if list(ms.unresolved()):
1857 1866 raise error.Abort(
1858 1867 _(b"outstanding merge conflicts"),
1859 1868 hint=_(b"use 'hg resolve' to resolve"),
1860 1869 )
1861 1870 if branchmerge:
1862 1871 if pas == [p2]:
1863 1872 raise error.Abort(
1864 1873 _(
1865 1874 b"merging with a working directory ancestor"
1866 1875 b" has no effect"
1867 1876 )
1868 1877 )
1869 1878 elif pas == [p1]:
1870 1879 if not mergeancestor and wc.branch() == p2.branch():
1871 1880 raise error.Abort(
1872 1881 _(b"nothing to merge"),
1873 1882 hint=_(b"use 'hg update' or check 'hg heads'"),
1874 1883 )
1875 1884 if not force and (wc.files() or wc.deleted()):
1876 1885 raise error.Abort(
1877 1886 _(b"uncommitted changes"),
1878 1887 hint=_(b"use 'hg status' to list changes"),
1879 1888 )
1880 1889 if not wc.isinmemory():
1881 1890 for s in sorted(wc.substate):
1882 1891 wc.sub(s).bailifchanged()
1883 1892
1884 1893 elif not overwrite:
1885 1894 if p1 == p2: # no-op update
1886 1895 # call the hooks and exit early
1887 1896 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1888 1897 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1889 1898 return updateresult(0, 0, 0, 0)
1890 1899
1891 1900 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1892 1901 [p1],
1893 1902 [p2],
1894 1903 ): # nonlinear
1895 1904 dirty = wc.dirty(missing=True)
1896 1905 if dirty:
1897 1906 # Branching is a bit strange to ensure we do the minimal
1898 1907 # amount of call to obsutil.foreground.
1899 1908 foreground = obsutil.foreground(repo, [p1.node()])
1900 1909 # note: the <node> variable contains a random identifier
1901 1910 if repo[node].node() in foreground:
1902 1911 pass # allow updating to successors
1903 1912 else:
1904 1913 msg = _(b"uncommitted changes")
1905 1914 hint = _(b"commit or update --clean to discard changes")
1906 1915 raise error.UpdateAbort(msg, hint=hint)
1907 1916 else:
1908 1917 # Allow jumping branches if clean and specific rev given
1909 1918 pass
1910 1919
1911 1920 if overwrite:
1912 1921 pas = [wc]
1913 1922 elif not branchmerge:
1914 1923 pas = [p1]
1915 1924
1916 1925 # deprecated config: merge.followcopies
1917 1926 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1918 1927 if overwrite:
1919 1928 followcopies = False
1920 1929 elif not pas[0]:
1921 1930 followcopies = False
1922 1931 if not branchmerge and not wc.dirty(missing=True):
1923 1932 followcopies = False
1924 1933
1925 1934 ### calculate phase
1926 1935 mresult = calculateupdates(
1927 1936 repo,
1928 1937 wc,
1929 1938 p2,
1930 1939 pas,
1931 1940 branchmerge,
1932 1941 force,
1933 1942 mergeancestor,
1934 1943 followcopies,
1935 1944 matcher=matcher,
1936 1945 mergeforce=mergeforce,
1937 1946 )
1938 1947
1939 1948 if updatecheck == UPDATECHECK_NO_CONFLICT:
1940 1949 if mresult.hasconflicts():
1941 1950 msg = _(b"conflicting changes")
1942 1951 hint = _(b"commit or update --clean to discard changes")
1943 1952 raise error.Abort(msg, hint=hint)
1944 1953
1945 1954 # Prompt and create actions. Most of this is in the resolve phase
1946 1955 # already, but we can't handle .hgsubstate in filemerge or
1947 1956 # subrepoutil.submerge yet so we have to keep prompting for it.
1948 1957 vals = mresult.getfile(b'.hgsubstate')
1949 1958 if vals:
1950 1959 f = b'.hgsubstate'
1951 1960 m, args, msg = vals
1952 1961 prompts = filemerge.partextras(labels)
1953 1962 prompts[b'f'] = f
1954 1963 if m == mergestatemod.ACTION_CHANGED_DELETED:
1955 1964 if repo.ui.promptchoice(
1956 1965 _(
1957 1966 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1958 1967 b"use (c)hanged version or (d)elete?"
1959 1968 b"$$ &Changed $$ &Delete"
1960 1969 )
1961 1970 % prompts,
1962 1971 0,
1963 1972 ):
1964 1973 mresult.addfile(
1965 1974 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1966 1975 )
1967 1976 elif f in p1:
1968 1977 mresult.addfile(
1969 1978 f,
1970 1979 mergestatemod.ACTION_ADD_MODIFIED,
1971 1980 None,
1972 1981 b'prompt keep',
1973 1982 )
1974 1983 else:
1975 1984 mresult.addfile(
1976 1985 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
1977 1986 )
1978 1987 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1979 1988 f1, f2, fa, move, anc = args
1980 1989 flags = p2[f2].flags()
1981 1990 if (
1982 1991 repo.ui.promptchoice(
1983 1992 _(
1984 1993 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1985 1994 b"use (c)hanged version or leave (d)eleted?"
1986 1995 b"$$ &Changed $$ &Deleted"
1987 1996 )
1988 1997 % prompts,
1989 1998 0,
1990 1999 )
1991 2000 == 0
1992 2001 ):
1993 2002 mresult.addfile(
1994 2003 f,
1995 2004 mergestatemod.ACTION_GET,
1996 2005 (flags, False),
1997 2006 b'prompt recreating',
1998 2007 )
1999 2008 else:
2000 2009 mresult.removefile(f)
2001 2010
2002 2011 if not util.fscasesensitive(repo.path):
2003 2012 # check collision between files only in p2 for clean update
2004 2013 if not branchmerge and (
2005 2014 force or not wc.dirty(missing=True, branch=False)
2006 2015 ):
2007 2016 _checkcollision(repo, p2.manifest(), None)
2008 2017 else:
2009 2018 _checkcollision(repo, wc.manifest(), mresult)
2010 2019
2011 2020 # divergent renames
2012 2021 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2013 2022 repo.ui.warn(
2014 2023 _(
2015 2024 b"note: possible conflict - %s was renamed "
2016 2025 b"multiple times to:\n"
2017 2026 )
2018 2027 % f
2019 2028 )
2020 2029 for nf in sorted(fl):
2021 2030 repo.ui.warn(b" %s\n" % nf)
2022 2031
2023 2032 # rename and delete
2024 2033 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2025 2034 repo.ui.warn(
2026 2035 _(
2027 2036 b"note: possible conflict - %s was deleted "
2028 2037 b"and renamed to:\n"
2029 2038 )
2030 2039 % f
2031 2040 )
2032 2041 for nf in sorted(fl):
2033 2042 repo.ui.warn(b" %s\n" % nf)
2034 2043
2035 2044 ### apply phase
2036 2045 if not branchmerge: # just jump to the new rev
2037 2046 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2038 2047 # If we're doing a partial update, we need to skip updating
2039 2048 # the dirstate.
2040 2049 always = matcher is None or matcher.always()
2041 2050 updatedirstate = updatedirstate and always and not wc.isinmemory()
2042 2051 if updatedirstate:
2043 2052 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2044 2053 # note that we're in the middle of an update
2045 2054 repo.vfs.write(b'updatestate', p2.hex())
2046 2055
2047 2056 _advertisefsmonitor(
2048 2057 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2049 2058 )
2050 2059
2051 2060 wantfiledata = updatedirstate and not branchmerge
2052 2061 stats, getfiledata = applyupdates(
2053 2062 repo, mresult, wc, p2, overwrite, wantfiledata, labels=labels,
2054 2063 )
2055 2064
2056 2065 if updatedirstate:
2057 2066 with repo.dirstate.parentchange():
2058 2067 repo.setparents(fp1, fp2)
2059 2068 mergestatemod.recordupdates(
2060 2069 repo, mresult.actionsdict, branchmerge, getfiledata
2061 2070 )
2062 2071 # update completed, clear state
2063 2072 util.unlink(repo.vfs.join(b'updatestate'))
2064 2073
2065 2074 if not branchmerge:
2066 2075 repo.dirstate.setbranch(p2.branch())
2067 2076
2068 2077 # If we're updating to a location, clean up any stale temporary includes
2069 2078 # (ex: this happens during hg rebase --abort).
2070 2079 if not branchmerge:
2071 2080 sparse.prunetemporaryincludes(repo)
2072 2081
2073 2082 if updatedirstate:
2074 2083 repo.hook(
2075 2084 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2076 2085 )
2077 2086 return stats
2078 2087
2079 2088
2080 2089 def merge(ctx, labels=None, force=False, wc=None):
2081 2090 """Merge another topological branch into the working copy.
2082 2091
2083 2092 force = whether the merge was run with 'merge --force' (deprecated)
2084 2093 """
2085 2094
2086 2095 return update(
2087 2096 ctx.repo(),
2088 2097 ctx.rev(),
2089 2098 labels=labels,
2090 2099 branchmerge=True,
2091 2100 force=force,
2092 2101 mergeforce=force,
2093 2102 wc=wc,
2094 2103 )
2095 2104
2096 2105
2097 2106 def clean_update(ctx, wc=None):
2098 2107 """Do a clean update to the given commit.
2099 2108
2100 2109 This involves updating to the commit and discarding any changes in the
2101 2110 working copy.
2102 2111 """
2103 2112 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2104 2113
2105 2114
2106 2115 def revert_to(ctx, matcher=None, wc=None):
2107 2116 """Revert the working copy to the given commit.
2108 2117
2109 2118 The working copy will keep its current parent(s) but its content will
2110 2119 be the same as in the given commit.
2111 2120 """
2112 2121
2113 2122 return update(
2114 2123 ctx.repo(),
2115 2124 ctx.rev(),
2116 2125 branchmerge=False,
2117 2126 force=True,
2118 2127 updatedirstate=False,
2119 2128 matcher=matcher,
2120 2129 wc=wc,
2121 2130 )
2122 2131
2123 2132
2124 2133 def graft(
2125 2134 repo,
2126 2135 ctx,
2127 2136 base=None,
2128 2137 labels=None,
2129 2138 keepparent=False,
2130 2139 keepconflictparent=False,
2131 2140 wctx=None,
2132 2141 ):
2133 2142 """Do a graft-like merge.
2134 2143
2135 2144 This is a merge where the merge ancestor is chosen such that one
2136 2145 or more changesets are grafted onto the current changeset. In
2137 2146 addition to the merge, this fixes up the dirstate to include only
2138 2147 a single parent (if keepparent is False) and tries to duplicate any
2139 2148 renames/copies appropriately.
2140 2149
2141 2150 ctx - changeset to rebase
2142 2151 base - merge base, or ctx.p1() if not specified
2143 2152 labels - merge labels eg ['local', 'graft']
2144 2153 keepparent - keep second parent if any
2145 2154 keepconflictparent - if unresolved, keep parent used for the merge
2146 2155
2147 2156 """
2148 2157 # If we're grafting a descendant onto an ancestor, be sure to pass
2149 2158 # mergeancestor=True to update. This does two things: 1) allows the merge if
2150 2159 # the destination is the same as the parent of the ctx (so we can use graft
2151 2160 # to copy commits), and 2) informs update that the incoming changes are
2152 2161 # newer than the destination so it doesn't prompt about "remote changed foo
2153 2162 # which local deleted".
2154 2163 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2155 2164 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2156 2165 wctx = wctx or repo[None]
2157 2166 pctx = wctx.p1()
2158 2167 base = base or ctx.p1()
2159 2168 mergeancestor = (
2160 2169 repo.changelog.isancestor(pctx.node(), ctx.node())
2161 2170 or pctx.rev() == base.rev()
2162 2171 )
2163 2172
2164 2173 stats = update(
2165 2174 repo,
2166 2175 ctx.node(),
2167 2176 True,
2168 2177 True,
2169 2178 base.node(),
2170 2179 mergeancestor=mergeancestor,
2171 2180 labels=labels,
2172 2181 wc=wctx,
2173 2182 )
2174 2183
2175 2184 if keepconflictparent and stats.unresolvedcount:
2176 2185 pother = ctx.node()
2177 2186 else:
2178 2187 pother = nullid
2179 2188 parents = ctx.parents()
2180 2189 if keepparent and len(parents) == 2 and base in parents:
2181 2190 parents.remove(base)
2182 2191 pother = parents[0].node()
2183 2192 # Never set both parents equal to each other
2184 2193 if pother == pctx.node():
2185 2194 pother = nullid
2186 2195
2187 2196 if wctx.isinmemory():
2188 2197 wctx.setparents(pctx.node(), pother)
2189 2198 # fix up dirstate for copies and renames
2190 2199 copies.graftcopies(wctx, ctx, base)
2191 2200 else:
2192 2201 with repo.dirstate.parentchange():
2193 2202 repo.setparents(pctx.node(), pother)
2194 2203 repo.dirstate.write(repo.currenttransaction())
2195 2204 # fix up dirstate for copies and renames
2196 2205 copies.graftcopies(wctx, ctx, base)
2197 2206 return stats
2198 2207
2199 2208
2200 2209 def purge(
2201 2210 repo,
2202 2211 matcher,
2203 2212 unknown=True,
2204 2213 ignored=False,
2205 2214 removeemptydirs=True,
2206 2215 removefiles=True,
2207 2216 abortonerror=False,
2208 2217 noop=False,
2209 2218 ):
2210 2219 """Purge the working directory of untracked files.
2211 2220
2212 2221 ``matcher`` is a matcher configured to scan the working directory -
2213 2222 potentially a subset.
2214 2223
2215 2224 ``unknown`` controls whether unknown files should be purged.
2216 2225
2217 2226 ``ignored`` controls whether ignored files should be purged.
2218 2227
2219 2228 ``removeemptydirs`` controls whether empty directories should be removed.
2220 2229
2221 2230 ``removefiles`` controls whether files are removed.
2222 2231
2223 2232 ``abortonerror`` causes an exception to be raised if an error occurs
2224 2233 deleting a file or directory.
2225 2234
2226 2235 ``noop`` controls whether to actually remove files. If not defined, actions
2227 2236 will be taken.
2228 2237
2229 2238 Returns an iterable of relative paths in the working directory that were
2230 2239 or would be removed.
2231 2240 """
2232 2241
2233 2242 def remove(removefn, path):
2234 2243 try:
2235 2244 removefn(path)
2236 2245 except OSError:
2237 2246 m = _(b'%s cannot be removed') % path
2238 2247 if abortonerror:
2239 2248 raise error.Abort(m)
2240 2249 else:
2241 2250 repo.ui.warn(_(b'warning: %s\n') % m)
2242 2251
2243 2252 # There's no API to copy a matcher. So mutate the passed matcher and
2244 2253 # restore it when we're done.
2245 2254 oldtraversedir = matcher.traversedir
2246 2255
2247 2256 res = []
2248 2257
2249 2258 try:
2250 2259 if removeemptydirs:
2251 2260 directories = []
2252 2261 matcher.traversedir = directories.append
2253 2262
2254 2263 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2255 2264
2256 2265 if removefiles:
2257 2266 for f in sorted(status.unknown + status.ignored):
2258 2267 if not noop:
2259 2268 repo.ui.note(_(b'removing file %s\n') % f)
2260 2269 remove(repo.wvfs.unlink, f)
2261 2270 res.append(f)
2262 2271
2263 2272 if removeemptydirs:
2264 2273 for f in sorted(directories, reverse=True):
2265 2274 if matcher(f) and not repo.wvfs.listdir(f):
2266 2275 if not noop:
2267 2276 repo.ui.note(_(b'removing directory %s\n') % f)
2268 2277 remove(repo.wvfs.rmdir, f)
2269 2278 res.append(f)
2270 2279
2271 2280 return res
2272 2281
2273 2282 finally:
2274 2283 matcher.traversedir = oldtraversedir
@@ -1,1308 +1,1309 b''
1 1 #testcases sshv1 sshv2
2 2
3 3 #if sshv2
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [experimental]
6 6 > sshpeer.advertise-v2 = true
7 7 > sshserver.support-v2 = true
8 8 > EOF
9 9 #endif
10 10
11 11 Prepare repo a:
12 12
13 13 $ hg init a
14 14 $ cd a
15 15 $ echo a > a
16 16 $ hg add a
17 17 $ hg commit -m test
18 18 $ echo first line > b
19 19 $ hg add b
20 20
21 21 Create a non-inlined filelog:
22 22
23 23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 25 > cat data1 >> b
26 26 > hg commit -m test
27 27 > done
28 28
29 29 List files in store/data (should show a 'b.d'):
30 30
31 31 #if reporevlogstore
32 32 $ for i in .hg/store/data/*; do
33 33 > echo $i
34 34 > done
35 35 .hg/store/data/a.i
36 36 .hg/store/data/b.d
37 37 .hg/store/data/b.i
38 38 #endif
39 39
40 40 Trigger branchcache creation:
41 41
42 42 $ hg branches
43 43 default 10:a7949464abda
44 44 $ ls .hg/cache
45 45 branch2-served
46 46 rbc-names-v1
47 47 rbc-revs-v1
48 48
49 49 Default operation:
50 50
51 51 $ hg clone . ../b
52 52 updating to branch default
53 53 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 54 $ cd ../b
55 55
56 56 Ensure branchcache got copied over:
57 57
58 58 $ ls .hg/cache
59 59 branch2-served
60 60 rbc-names-v1
61 61 rbc-revs-v1
62 62
63 63 $ cat a
64 64 a
65 65 $ hg verify
66 66 checking changesets
67 67 checking manifests
68 68 crosschecking files in changesets and manifests
69 69 checking files
70 70 checked 11 changesets with 11 changes to 2 files
71 71
72 72 Invalid dest '' must abort:
73 73
74 74 $ hg clone . ''
75 75 abort: empty destination path is not valid
76 76 [255]
77 77
78 78 No update, with debug option:
79 79
80 80 #if hardlink
81 81 $ hg --debug clone -U . ../c --config progress.debug=true
82 82 linking: 1 files
83 83 linking: 2 files
84 84 linking: 3 files
85 85 linking: 4 files
86 86 linking: 5 files
87 87 linking: 6 files
88 88 linking: 7 files
89 89 linking: 8 files
90 90 linked 8 files (reporevlogstore !)
91 91 linking: 9 files (reposimplestore !)
92 92 linking: 10 files (reposimplestore !)
93 93 linking: 11 files (reposimplestore !)
94 94 linking: 12 files (reposimplestore !)
95 95 linking: 13 files (reposimplestore !)
96 96 linking: 14 files (reposimplestore !)
97 97 linking: 15 files (reposimplestore !)
98 98 linking: 16 files (reposimplestore !)
99 99 linking: 17 files (reposimplestore !)
100 100 linking: 18 files (reposimplestore !)
101 101 linked 18 files (reposimplestore !)
102 102 #else
103 103 $ hg --debug clone -U . ../c --config progress.debug=true
104 104 linking: 1 files
105 105 copying: 2 files
106 106 copying: 3 files
107 107 copying: 4 files
108 108 copying: 5 files
109 109 copying: 6 files
110 110 copying: 7 files
111 111 copying: 8 files
112 112 copied 8 files (reporevlogstore !)
113 113 copying: 9 files (reposimplestore !)
114 114 copying: 10 files (reposimplestore !)
115 115 copying: 11 files (reposimplestore !)
116 116 copying: 12 files (reposimplestore !)
117 117 copying: 13 files (reposimplestore !)
118 118 copying: 14 files (reposimplestore !)
119 119 copying: 15 files (reposimplestore !)
120 120 copying: 16 files (reposimplestore !)
121 121 copying: 17 files (reposimplestore !)
122 122 copying: 18 files (reposimplestore !)
123 123 copied 18 files (reposimplestore !)
124 124 #endif
125 125 $ cd ../c
126 126
127 127 Ensure branchcache got copied over:
128 128
129 129 $ ls .hg/cache
130 130 branch2-served
131 131 rbc-names-v1
132 132 rbc-revs-v1
133 133
134 134 $ cat a 2>/dev/null || echo "a not present"
135 135 a not present
136 136 $ hg verify
137 137 checking changesets
138 138 checking manifests
139 139 crosschecking files in changesets and manifests
140 140 checking files
141 141 checked 11 changesets with 11 changes to 2 files
142 142
143 143 Default destination:
144 144
145 145 $ mkdir ../d
146 146 $ cd ../d
147 147 $ hg clone ../a
148 148 destination directory: a
149 149 updating to branch default
150 150 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 151 $ cd a
152 152 $ hg cat a
153 153 a
154 154 $ cd ../..
155 155
156 156 Check that we drop the 'file:' from the path before writing the .hgrc:
157 157
158 158 $ hg clone file:a e
159 159 updating to branch default
160 160 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 161 $ grep 'file:' e/.hg/hgrc
162 162 [1]
163 163
164 164 Check that path aliases are expanded:
165 165
166 166 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
167 167 $ hg -R f showconfig paths.default
168 168 $TESTTMP/a#0
169 169
170 170 Use --pull:
171 171
172 172 $ hg clone --pull a g
173 173 requesting all changes
174 174 adding changesets
175 175 adding manifests
176 176 adding file changes
177 177 added 11 changesets with 11 changes to 2 files
178 178 new changesets acb14030fe0a:a7949464abda
179 179 updating to branch default
180 180 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
181 181 $ hg -R g verify
182 182 checking changesets
183 183 checking manifests
184 184 crosschecking files in changesets and manifests
185 185 checking files
186 186 checked 11 changesets with 11 changes to 2 files
187 187
188 188 Invalid dest '' with --pull must abort (issue2528):
189 189
190 190 $ hg clone --pull a ''
191 191 abort: empty destination path is not valid
192 192 [255]
193 193
194 194 Clone to '.':
195 195
196 196 $ mkdir h
197 197 $ cd h
198 198 $ hg clone ../a .
199 199 updating to branch default
200 200 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 201 $ cd ..
202 202
203 203
204 204 *** Tests for option -u ***
205 205
206 206 Adding some more history to repo a:
207 207
208 208 $ cd a
209 209 $ hg tag ref1
210 210 $ echo the quick brown fox >a
211 211 $ hg ci -m "hacked default"
212 212 $ hg up ref1
213 213 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
214 214 $ hg branch stable
215 215 marked working directory as branch stable
216 216 (branches are permanent and global, did you want a bookmark?)
217 217 $ echo some text >a
218 218 $ hg ci -m "starting branch stable"
219 219 $ hg tag ref2
220 220 $ echo some more text >a
221 221 $ hg ci -m "another change for branch stable"
222 222 $ hg up ref2
223 223 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
224 224 $ hg parents
225 225 changeset: 13:e8ece76546a6
226 226 branch: stable
227 227 tag: ref2
228 228 parent: 10:a7949464abda
229 229 user: test
230 230 date: Thu Jan 01 00:00:00 1970 +0000
231 231 summary: starting branch stable
232 232
233 233
234 234 Repo a has two heads:
235 235
236 236 $ hg heads
237 237 changeset: 15:0aae7cf88f0d
238 238 branch: stable
239 239 tag: tip
240 240 user: test
241 241 date: Thu Jan 01 00:00:00 1970 +0000
242 242 summary: another change for branch stable
243 243
244 244 changeset: 12:f21241060d6a
245 245 user: test
246 246 date: Thu Jan 01 00:00:00 1970 +0000
247 247 summary: hacked default
248 248
249 249
250 250 $ cd ..
251 251
252 252
253 253 Testing --noupdate with --updaterev (must abort):
254 254
255 255 $ hg clone --noupdate --updaterev 1 a ua
256 256 abort: cannot specify both --noupdate and --updaterev
257 257 [255]
258 258
259 259
260 260 Testing clone -u:
261 261
262 262 $ hg clone -u . a ua
263 263 updating to branch stable
264 264 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 265
266 266 Repo ua has both heads:
267 267
268 268 $ hg -R ua heads
269 269 changeset: 15:0aae7cf88f0d
270 270 branch: stable
271 271 tag: tip
272 272 user: test
273 273 date: Thu Jan 01 00:00:00 1970 +0000
274 274 summary: another change for branch stable
275 275
276 276 changeset: 12:f21241060d6a
277 277 user: test
278 278 date: Thu Jan 01 00:00:00 1970 +0000
279 279 summary: hacked default
280 280
281 281
282 282 Same revision checked out in repo a and ua:
283 283
284 284 $ hg -R a parents --template "{node|short}\n"
285 285 e8ece76546a6
286 286 $ hg -R ua parents --template "{node|short}\n"
287 287 e8ece76546a6
288 288
289 289 $ rm -r ua
290 290
291 291
292 292 Testing clone --pull -u:
293 293
294 294 $ hg clone --pull -u . a ua
295 295 requesting all changes
296 296 adding changesets
297 297 adding manifests
298 298 adding file changes
299 299 added 16 changesets with 16 changes to 3 files (+1 heads)
300 300 new changesets acb14030fe0a:0aae7cf88f0d
301 301 updating to branch stable
302 302 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
303 303
304 304 Repo ua has both heads:
305 305
306 306 $ hg -R ua heads
307 307 changeset: 15:0aae7cf88f0d
308 308 branch: stable
309 309 tag: tip
310 310 user: test
311 311 date: Thu Jan 01 00:00:00 1970 +0000
312 312 summary: another change for branch stable
313 313
314 314 changeset: 12:f21241060d6a
315 315 user: test
316 316 date: Thu Jan 01 00:00:00 1970 +0000
317 317 summary: hacked default
318 318
319 319
320 320 Same revision checked out in repo a and ua:
321 321
322 322 $ hg -R a parents --template "{node|short}\n"
323 323 e8ece76546a6
324 324 $ hg -R ua parents --template "{node|short}\n"
325 325 e8ece76546a6
326 326
327 327 $ rm -r ua
328 328
329 329
330 330 Testing clone -u <branch>:
331 331
332 332 $ hg clone -u stable a ua
333 333 updating to branch stable
334 334 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
335 335
336 336 Repo ua has both heads:
337 337
338 338 $ hg -R ua heads
339 339 changeset: 15:0aae7cf88f0d
340 340 branch: stable
341 341 tag: tip
342 342 user: test
343 343 date: Thu Jan 01 00:00:00 1970 +0000
344 344 summary: another change for branch stable
345 345
346 346 changeset: 12:f21241060d6a
347 347 user: test
348 348 date: Thu Jan 01 00:00:00 1970 +0000
349 349 summary: hacked default
350 350
351 351
352 352 Branch 'stable' is checked out:
353 353
354 354 $ hg -R ua parents
355 355 changeset: 15:0aae7cf88f0d
356 356 branch: stable
357 357 tag: tip
358 358 user: test
359 359 date: Thu Jan 01 00:00:00 1970 +0000
360 360 summary: another change for branch stable
361 361
362 362
363 363 $ rm -r ua
364 364
365 365
366 366 Testing default checkout:
367 367
368 368 $ hg clone a ua
369 369 updating to branch default
370 370 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
371 371
372 372 Repo ua has both heads:
373 373
374 374 $ hg -R ua heads
375 375 changeset: 15:0aae7cf88f0d
376 376 branch: stable
377 377 tag: tip
378 378 user: test
379 379 date: Thu Jan 01 00:00:00 1970 +0000
380 380 summary: another change for branch stable
381 381
382 382 changeset: 12:f21241060d6a
383 383 user: test
384 384 date: Thu Jan 01 00:00:00 1970 +0000
385 385 summary: hacked default
386 386
387 387
388 388 Branch 'default' is checked out:
389 389
390 390 $ hg -R ua parents
391 391 changeset: 12:f21241060d6a
392 392 user: test
393 393 date: Thu Jan 01 00:00:00 1970 +0000
394 394 summary: hacked default
395 395
396 396 Test clone with a branch named "@" (issue3677)
397 397
398 398 $ hg -R ua branch @
399 399 marked working directory as branch @
400 400 $ hg -R ua commit -m 'created branch @'
401 401 $ hg clone ua atbranch
402 402 updating to branch default
403 403 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
404 404 $ hg -R atbranch heads
405 405 changeset: 16:798b6d97153e
406 406 branch: @
407 407 tag: tip
408 408 parent: 12:f21241060d6a
409 409 user: test
410 410 date: Thu Jan 01 00:00:00 1970 +0000
411 411 summary: created branch @
412 412
413 413 changeset: 15:0aae7cf88f0d
414 414 branch: stable
415 415 user: test
416 416 date: Thu Jan 01 00:00:00 1970 +0000
417 417 summary: another change for branch stable
418 418
419 419 changeset: 12:f21241060d6a
420 420 user: test
421 421 date: Thu Jan 01 00:00:00 1970 +0000
422 422 summary: hacked default
423 423
424 424 $ hg -R atbranch parents
425 425 changeset: 12:f21241060d6a
426 426 user: test
427 427 date: Thu Jan 01 00:00:00 1970 +0000
428 428 summary: hacked default
429 429
430 430
431 431 $ rm -r ua atbranch
432 432
433 433
434 434 Testing #<branch>:
435 435
436 436 $ hg clone -u . a#stable ua
437 437 adding changesets
438 438 adding manifests
439 439 adding file changes
440 440 added 14 changesets with 14 changes to 3 files
441 441 new changesets acb14030fe0a:0aae7cf88f0d
442 442 updating to branch stable
443 443 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
444 444
445 445 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
446 446
447 447 $ hg -R ua heads
448 448 changeset: 13:0aae7cf88f0d
449 449 branch: stable
450 450 tag: tip
451 451 user: test
452 452 date: Thu Jan 01 00:00:00 1970 +0000
453 453 summary: another change for branch stable
454 454
455 455 changeset: 10:a7949464abda
456 456 user: test
457 457 date: Thu Jan 01 00:00:00 1970 +0000
458 458 summary: test
459 459
460 460
461 461 Same revision checked out in repo a and ua:
462 462
463 463 $ hg -R a parents --template "{node|short}\n"
464 464 e8ece76546a6
465 465 $ hg -R ua parents --template "{node|short}\n"
466 466 e8ece76546a6
467 467
468 468 $ rm -r ua
469 469
470 470
471 471 Testing -u -r <branch>:
472 472
473 473 $ hg clone -u . -r stable a ua
474 474 adding changesets
475 475 adding manifests
476 476 adding file changes
477 477 added 14 changesets with 14 changes to 3 files
478 478 new changesets acb14030fe0a:0aae7cf88f0d
479 479 updating to branch stable
480 480 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
481 481
482 482 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
483 483
484 484 $ hg -R ua heads
485 485 changeset: 13:0aae7cf88f0d
486 486 branch: stable
487 487 tag: tip
488 488 user: test
489 489 date: Thu Jan 01 00:00:00 1970 +0000
490 490 summary: another change for branch stable
491 491
492 492 changeset: 10:a7949464abda
493 493 user: test
494 494 date: Thu Jan 01 00:00:00 1970 +0000
495 495 summary: test
496 496
497 497
498 498 Same revision checked out in repo a and ua:
499 499
500 500 $ hg -R a parents --template "{node|short}\n"
501 501 e8ece76546a6
502 502 $ hg -R ua parents --template "{node|short}\n"
503 503 e8ece76546a6
504 504
505 505 $ rm -r ua
506 506
507 507
508 508 Testing -r <branch>:
509 509
510 510 $ hg clone -r stable a ua
511 511 adding changesets
512 512 adding manifests
513 513 adding file changes
514 514 added 14 changesets with 14 changes to 3 files
515 515 new changesets acb14030fe0a:0aae7cf88f0d
516 516 updating to branch stable
517 517 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
518 518
519 519 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
520 520
521 521 $ hg -R ua heads
522 522 changeset: 13:0aae7cf88f0d
523 523 branch: stable
524 524 tag: tip
525 525 user: test
526 526 date: Thu Jan 01 00:00:00 1970 +0000
527 527 summary: another change for branch stable
528 528
529 529 changeset: 10:a7949464abda
530 530 user: test
531 531 date: Thu Jan 01 00:00:00 1970 +0000
532 532 summary: test
533 533
534 534
535 535 Branch 'stable' is checked out:
536 536
537 537 $ hg -R ua parents
538 538 changeset: 13:0aae7cf88f0d
539 539 branch: stable
540 540 tag: tip
541 541 user: test
542 542 date: Thu Jan 01 00:00:00 1970 +0000
543 543 summary: another change for branch stable
544 544
545 545
546 546 $ rm -r ua
547 547
548 548
549 549 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
550 550 iterable in addbranchrevs()
551 551
552 552 $ cat <<EOF > simpleclone.py
553 553 > from mercurial import hg, ui as uimod
554 554 > myui = uimod.ui.load()
555 555 > repo = hg.repository(myui, b'a')
556 556 > hg.clone(myui, {}, repo, dest=b"ua")
557 557 > EOF
558 558
559 559 $ "$PYTHON" simpleclone.py
560 560 updating to branch default
561 561 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
562 562
563 563 $ rm -r ua
564 564
565 565 $ cat <<EOF > branchclone.py
566 566 > from mercurial import extensions, hg, ui as uimod
567 567 > myui = uimod.ui.load()
568 568 > extensions.loadall(myui)
569 569 > extensions.populateui(myui)
570 570 > repo = hg.repository(myui, b'a')
571 571 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
572 572 > EOF
573 573
574 574 $ "$PYTHON" branchclone.py
575 575 adding changesets
576 576 adding manifests
577 577 adding file changes
578 578 added 14 changesets with 14 changes to 3 files
579 579 new changesets acb14030fe0a:0aae7cf88f0d
580 580 updating to branch stable
581 581 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
582 582 $ rm -r ua
583 583
584 584
585 585 Test clone with special '@' bookmark:
586 586 $ cd a
587 587 $ hg bookmark -r a7949464abda @ # branch point of stable from default
588 588 $ hg clone . ../i
589 589 updating to bookmark @
590 590 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
591 591 $ hg id -i ../i
592 592 a7949464abda
593 593 $ rm -r ../i
594 594
595 595 $ hg bookmark -f -r stable @
596 596 $ hg bookmarks
597 597 @ 15:0aae7cf88f0d
598 598 $ hg clone . ../i
599 599 updating to bookmark @ on branch stable
600 600 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
601 601 $ hg id -i ../i
602 602 0aae7cf88f0d
603 603 $ cd "$TESTTMP"
604 604
605 605
606 606 Testing failures:
607 607
608 608 $ mkdir fail
609 609 $ cd fail
610 610
611 611 No local source
612 612
613 613 $ hg clone a b
614 614 abort: repository a not found!
615 615 [255]
616 616
617 617 Invalid URL
618 618
619 619 $ hg clone http://invalid:url/a b
620 620 abort: error: nonnumeric port: 'url'
621 621 [255]
622 622
623 623 No remote source
624 624
625 625 #if windows
626 626 $ hg clone http://$LOCALIP:3121/a b
627 627 abort: error: * (glob)
628 628 [255]
629 629 #else
630 630 $ hg clone http://$LOCALIP:3121/a b
631 631 abort: error: *refused* (glob)
632 632 [255]
633 633 #endif
634 634 $ rm -rf b # work around bug with http clone
635 635
636 636
637 637 #if unix-permissions no-root
638 638
639 639 Inaccessible source
640 640
641 641 $ mkdir a
642 642 $ chmod 000 a
643 643 $ hg clone a b
644 644 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
645 645 [255]
646 646
647 647 Inaccessible destination
648 648
649 649 $ hg init b
650 650 $ cd b
651 651 $ hg clone . ../a
652 652 abort: Permission denied: *../a* (glob)
653 653 [255]
654 654 $ cd ..
655 655 $ chmod 700 a
656 656 $ rm -r a b
657 657
658 658 #endif
659 659
660 660
661 661 #if fifo
662 662
663 663 Source of wrong type
664 664
665 665 $ mkfifo a
666 666 $ hg clone a b
667 667 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
668 668 [255]
669 669 $ rm a
670 670
671 671 #endif
672 672
673 673 Default destination, same directory
674 674
675 675 $ hg init q
676 676 $ hg clone q
677 677 destination directory: q
678 678 abort: destination 'q' is not empty
679 679 [255]
680 680
681 681 destination directory not empty
682 682
683 683 $ mkdir a
684 684 $ echo stuff > a/a
685 685 $ hg clone q a
686 686 abort: destination 'a' is not empty
687 687 [255]
688 688
689 689
690 690 #if unix-permissions no-root
691 691
692 692 leave existing directory in place after clone failure
693 693
694 694 $ hg init c
695 695 $ cd c
696 696 $ echo c > c
697 697 $ hg commit -A -m test
698 698 adding c
699 699 $ chmod -rx .hg/store/data
700 700 $ cd ..
701 701 $ mkdir d
702 702 $ hg clone c d 2> err
703 703 [255]
704 704 $ test -d d
705 705 $ test -d d/.hg
706 706 [1]
707 707
708 708 re-enable perm to allow deletion
709 709
710 710 $ chmod +rx c/.hg/store/data
711 711
712 712 #endif
713 713
714 714 $ cd ..
715 715
716 716 Test clone from the repository in (emulated) revlog format 0 (issue4203):
717 717
718 718 $ mkdir issue4203
719 719 $ mkdir -p src/.hg
720 720 $ echo foo > src/foo
721 721 $ hg -R src add src/foo
722 722 $ hg -R src commit -m '#0'
723 723 $ hg -R src log -q
724 724 0:e1bab28bca43
725 725 $ hg -R src debugrevlog -c | egrep 'format|flags'
726 726 format : 0
727 727 flags : (none)
728 728 $ hg root -R src -T json | sed 's|\\\\|\\|g'
729 729 [
730 730 {
731 731 "hgpath": "$TESTTMP/src/.hg",
732 732 "reporoot": "$TESTTMP/src",
733 733 "storepath": "$TESTTMP/src/.hg"
734 734 }
735 735 ]
736 736 $ hg clone -U -q src dst
737 737 $ hg -R dst log -q
738 738 0:e1bab28bca43
739 739
740 740 Create repositories to test auto sharing functionality
741 741
742 742 $ cat >> $HGRCPATH << EOF
743 743 > [extensions]
744 744 > share=
745 745 > EOF
746 746
747 747 $ hg init empty
748 748 $ hg init source1a
749 749 $ cd source1a
750 750 $ echo initial1 > foo
751 751 $ hg -q commit -A -m initial
752 752 $ echo second > foo
753 753 $ hg commit -m second
754 754 $ cd ..
755 755
756 756 $ hg init filteredrev0
757 757 $ cd filteredrev0
758 758 $ cat >> .hg/hgrc << EOF
759 759 > [experimental]
760 760 > evolution.createmarkers=True
761 761 > EOF
762 762 $ echo initial1 > foo
763 763 $ hg -q commit -A -m initial0
764 764 $ hg -q up -r null
765 765 $ echo initial2 > foo
766 766 $ hg -q commit -A -m initial1
767 767 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
768 768 1 new obsolescence markers
769 769 obsoleted 1 changesets
770 770 $ cd ..
771 771
772 772 $ hg -q clone --pull source1a source1b
773 773 $ cd source1a
774 774 $ hg bookmark bookA
775 775 $ echo 1a > foo
776 776 $ hg commit -m 1a
777 777 $ cd ../source1b
778 778 $ hg -q up -r 0
779 779 $ echo head1 > foo
780 780 $ hg commit -m head1
781 781 created new head
782 782 $ hg bookmark head1
783 783 $ hg -q up -r 0
784 784 $ echo head2 > foo
785 785 $ hg commit -m head2
786 786 created new head
787 787 $ hg bookmark head2
788 788 $ hg -q up -r 0
789 789 $ hg branch branch1
790 790 marked working directory as branch branch1
791 791 (branches are permanent and global, did you want a bookmark?)
792 792 $ echo branch1 > foo
793 793 $ hg commit -m branch1
794 794 $ hg -q up -r 0
795 795 $ hg branch branch2
796 796 marked working directory as branch branch2
797 797 $ echo branch2 > foo
798 798 $ hg commit -m branch2
799 799 $ cd ..
800 800 $ hg init source2
801 801 $ cd source2
802 802 $ echo initial2 > foo
803 803 $ hg -q commit -A -m initial2
804 804 $ echo second > foo
805 805 $ hg commit -m second
806 806 $ cd ..
807 807
808 808 Clone with auto share from an empty repo should not result in share
809 809
810 810 $ mkdir share
811 811 $ hg --config share.pool=share clone empty share-empty
812 812 (not using pooled storage: remote appears to be empty)
813 813 updating to branch default
814 814 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
815 815 $ ls share
816 816 $ test -d share-empty/.hg/store
817 817 $ test -f share-empty/.hg/sharedpath
818 818 [1]
819 819
820 820 Clone with auto share from a repo with filtered revision 0 should not result in share
821 821
822 822 $ hg --config share.pool=share clone filteredrev0 share-filtered
823 823 (not using pooled storage: unable to resolve identity of remote)
824 824 requesting all changes
825 825 adding changesets
826 826 adding manifests
827 827 adding file changes
828 828 added 1 changesets with 1 changes to 1 files
829 829 new changesets e082c1832e09
830 830 updating to branch default
831 831 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
832 832
833 833 Clone from repo with content should result in shared store being created
834 834
835 835 $ hg --config share.pool=share clone source1a share-dest1a
836 836 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
837 837 requesting all changes
838 838 adding changesets
839 839 adding manifests
840 840 adding file changes
841 841 added 3 changesets with 3 changes to 1 files
842 842 new changesets b5f04eac9d8f:e5bfe23c0b47
843 843 searching for changes
844 844 no changes found
845 845 adding remote bookmark bookA
846 846 updating working directory
847 847 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
848 848
849 849 The shared repo should have been created
850 850
851 851 $ ls share
852 852 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
853 853
854 854 The destination should point to it
855 855
856 856 $ cat share-dest1a/.hg/sharedpath; echo
857 857 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
858 858
859 859 The destination should have bookmarks
860 860
861 861 $ hg -R share-dest1a bookmarks
862 862 bookA 2:e5bfe23c0b47
863 863
864 864 The default path should be the remote, not the share
865 865
866 866 $ hg -R share-dest1a config paths.default
867 867 $TESTTMP/source1a
868 868
869 869 Clone with existing share dir should result in pull + share
870 870
871 871 $ hg --config share.pool=share clone source1b share-dest1b
872 872 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
873 873 searching for changes
874 874 adding changesets
875 875 adding manifests
876 876 adding file changes
877 877 adding remote bookmark head1
878 878 adding remote bookmark head2
879 879 added 4 changesets with 4 changes to 1 files (+4 heads)
880 880 new changesets 4a8dc1ab4c13:6bacf4683960
881 881 updating working directory
882 882 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
883 883
884 884 $ ls share
885 885 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
886 886
887 887 $ cat share-dest1b/.hg/sharedpath; echo
888 888 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
889 889
890 890 We only get bookmarks from the remote, not everything in the share
891 891
892 892 $ hg -R share-dest1b bookmarks
893 893 head1 3:4a8dc1ab4c13
894 894 head2 4:99f71071f117
895 895
896 896 Default path should be source, not share.
897 897
898 898 $ hg -R share-dest1b config paths.default
899 899 $TESTTMP/source1b
900 900
901 901 Checked out revision should be head of default branch
902 902
903 903 $ hg -R share-dest1b log -r .
904 904 changeset: 4:99f71071f117
905 905 bookmark: head2
906 906 parent: 0:b5f04eac9d8f
907 907 user: test
908 908 date: Thu Jan 01 00:00:00 1970 +0000
909 909 summary: head2
910 910
911 911
912 912 Clone from unrelated repo should result in new share
913 913
914 914 $ hg --config share.pool=share clone source2 share-dest2
915 915 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
916 916 requesting all changes
917 917 adding changesets
918 918 adding manifests
919 919 adding file changes
920 920 added 2 changesets with 2 changes to 1 files
921 921 new changesets 22aeff664783:63cf6c3dba4a
922 922 searching for changes
923 923 no changes found
924 924 updating working directory
925 925 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
926 926
927 927 $ ls share
928 928 22aeff664783fd44c6d9b435618173c118c3448e
929 929 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
930 930
931 931 remote naming mode works as advertised
932 932
933 933 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
934 934 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
935 935 requesting all changes
936 936 adding changesets
937 937 adding manifests
938 938 adding file changes
939 939 added 3 changesets with 3 changes to 1 files
940 940 new changesets b5f04eac9d8f:e5bfe23c0b47
941 941 searching for changes
942 942 no changes found
943 943 adding remote bookmark bookA
944 944 updating working directory
945 945 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
946 946
947 947 $ ls shareremote
948 948 195bb1fcdb595c14a6c13e0269129ed78f6debde
949 949
950 950 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
951 951 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
952 952 requesting all changes
953 953 adding changesets
954 954 adding manifests
955 955 adding file changes
956 956 added 6 changesets with 6 changes to 1 files (+4 heads)
957 957 new changesets b5f04eac9d8f:6bacf4683960
958 958 searching for changes
959 959 no changes found
960 960 adding remote bookmark head1
961 961 adding remote bookmark head2
962 962 updating working directory
963 963 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
964 964
965 965 $ ls shareremote
966 966 195bb1fcdb595c14a6c13e0269129ed78f6debde
967 967 c0d4f83847ca2a873741feb7048a45085fd47c46
968 968
969 969 request to clone a single revision is respected in sharing mode
970 970
971 971 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
972 972 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
973 973 adding changesets
974 974 adding manifests
975 975 adding file changes
976 976 added 2 changesets with 2 changes to 1 files
977 977 new changesets b5f04eac9d8f:4a8dc1ab4c13
978 978 no changes found
979 979 adding remote bookmark head1
980 980 updating working directory
981 981 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
982 982
983 983 $ hg -R share-1arev log -G
984 984 @ changeset: 1:4a8dc1ab4c13
985 985 | bookmark: head1
986 986 | tag: tip
987 987 | user: test
988 988 | date: Thu Jan 01 00:00:00 1970 +0000
989 989 | summary: head1
990 990 |
991 991 o changeset: 0:b5f04eac9d8f
992 992 user: test
993 993 date: Thu Jan 01 00:00:00 1970 +0000
994 994 summary: initial
995 995
996 996
997 997 making another clone should only pull down requested rev
998 998
999 999 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
1000 1000 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1001 1001 searching for changes
1002 1002 adding changesets
1003 1003 adding manifests
1004 1004 adding file changes
1005 1005 adding remote bookmark head1
1006 1006 adding remote bookmark head2
1007 1007 added 1 changesets with 1 changes to 1 files (+1 heads)
1008 1008 new changesets 99f71071f117
1009 1009 updating working directory
1010 1010 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1011 1011
1012 1012 $ hg -R share-1brev log -G
1013 1013 @ changeset: 2:99f71071f117
1014 1014 | bookmark: head2
1015 1015 | tag: tip
1016 1016 | parent: 0:b5f04eac9d8f
1017 1017 | user: test
1018 1018 | date: Thu Jan 01 00:00:00 1970 +0000
1019 1019 | summary: head2
1020 1020 |
1021 1021 | o changeset: 1:4a8dc1ab4c13
1022 1022 |/ bookmark: head1
1023 1023 | user: test
1024 1024 | date: Thu Jan 01 00:00:00 1970 +0000
1025 1025 | summary: head1
1026 1026 |
1027 1027 o changeset: 0:b5f04eac9d8f
1028 1028 user: test
1029 1029 date: Thu Jan 01 00:00:00 1970 +0000
1030 1030 summary: initial
1031 1031
1032 1032
1033 1033 Request to clone a single branch is respected in sharing mode
1034 1034
1035 1035 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1036 1036 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1037 1037 adding changesets
1038 1038 adding manifests
1039 1039 adding file changes
1040 1040 added 2 changesets with 2 changes to 1 files
1041 1041 new changesets b5f04eac9d8f:5f92a6c1a1b1
1042 1042 no changes found
1043 1043 updating working directory
1044 1044 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1045 1045
1046 1046 $ hg -R share-1bbranch1 log -G
1047 1047 o changeset: 1:5f92a6c1a1b1
1048 1048 | branch: branch1
1049 1049 | tag: tip
1050 1050 | user: test
1051 1051 | date: Thu Jan 01 00:00:00 1970 +0000
1052 1052 | summary: branch1
1053 1053 |
1054 1054 @ changeset: 0:b5f04eac9d8f
1055 1055 user: test
1056 1056 date: Thu Jan 01 00:00:00 1970 +0000
1057 1057 summary: initial
1058 1058
1059 1059
1060 1060 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1061 1061 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1062 1062 searching for changes
1063 1063 adding changesets
1064 1064 adding manifests
1065 1065 adding file changes
1066 1066 added 1 changesets with 1 changes to 1 files (+1 heads)
1067 1067 new changesets 6bacf4683960
1068 1068 updating working directory
1069 1069 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1070 1070
1071 1071 $ hg -R share-1bbranch2 log -G
1072 1072 o changeset: 2:6bacf4683960
1073 1073 | branch: branch2
1074 1074 | tag: tip
1075 1075 | parent: 0:b5f04eac9d8f
1076 1076 | user: test
1077 1077 | date: Thu Jan 01 00:00:00 1970 +0000
1078 1078 | summary: branch2
1079 1079 |
1080 1080 | o changeset: 1:5f92a6c1a1b1
1081 1081 |/ branch: branch1
1082 1082 | user: test
1083 1083 | date: Thu Jan 01 00:00:00 1970 +0000
1084 1084 | summary: branch1
1085 1085 |
1086 1086 @ changeset: 0:b5f04eac9d8f
1087 1087 user: test
1088 1088 date: Thu Jan 01 00:00:00 1970 +0000
1089 1089 summary: initial
1090 1090
1091 1091
1092 1092 -U is respected in share clone mode
1093 1093
1094 1094 $ hg --config share.pool=share clone -U source1a share-1anowc
1095 1095 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1096 1096 searching for changes
1097 1097 no changes found
1098 1098 adding remote bookmark bookA
1099 1099
1100 1100 $ ls -A share-1anowc
1101 1101 .hg
1102 1102
1103 1103 Test that auto sharing doesn't cause failure of "hg clone local remote"
1104 1104
1105 1105 $ cd $TESTTMP
1106 1106 $ hg -R a id -r 0
1107 1107 acb14030fe0a
1108 1108 $ hg id -R remote -r 0
1109 1109 abort: repository remote not found!
1110 1110 [255]
1111 1111 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1112 1112 $ hg -R remote id -r 0
1113 1113 acb14030fe0a
1114 1114
1115 1115 Cloning into pooled storage doesn't race (issue5104)
1116 1116
1117 1117 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1118 1118 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1119 1119 $ wait
1120 1120
1121 1121 $ hg -R share-destrace1 log -r tip
1122 1122 changeset: 2:e5bfe23c0b47
1123 1123 bookmark: bookA
1124 1124 tag: tip
1125 1125 user: test
1126 1126 date: Thu Jan 01 00:00:00 1970 +0000
1127 1127 summary: 1a
1128 1128
1129 1129
1130 1130 $ hg -R share-destrace2 log -r tip
1131 1131 changeset: 2:e5bfe23c0b47
1132 1132 bookmark: bookA
1133 1133 tag: tip
1134 1134 user: test
1135 1135 date: Thu Jan 01 00:00:00 1970 +0000
1136 1136 summary: 1a
1137 1137
1138 1138 One repo should be new, the other should be shared from the pool. We
1139 1139 don't care which is which, so we just make sure we always print the
1140 1140 one containing "new pooled" first, then one one containing "existing
1141 1141 pooled".
1142 1142
1143 1143 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1144 1144 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1145 1145 requesting all changes
1146 1146 adding changesets
1147 1147 adding manifests
1148 1148 adding file changes
1149 1149 added 3 changesets with 3 changes to 1 files
1150 1150 new changesets b5f04eac9d8f:e5bfe23c0b47
1151 1151 searching for changes
1152 1152 no changes found
1153 1153 adding remote bookmark bookA
1154 1154 updating working directory
1155 1155 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1156 1156
1157 1157 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1158 1158 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1159 1159 searching for changes
1160 1160 no changes found
1161 1161 adding remote bookmark bookA
1162 1162 updating working directory
1163 1163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1164 1164
1165 1165 SEC: check for unsafe ssh url
1166 1166
1167 1167 $ cat >> $HGRCPATH << EOF
1168 1168 > [ui]
1169 1169 > ssh = sh -c "read l; read l; read l"
1170 1170 > EOF
1171 1171
1172 1172 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1173 1173 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1174 1174 [255]
1175 1175 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1176 1176 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1177 1177 [255]
1178 1178 $ hg clone 'ssh://fakehost|touch%20owned/path'
1179 1179 abort: no suitable response from remote hg!
1180 1180 [255]
1181 1181 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1182 1182 abort: no suitable response from remote hg!
1183 1183 [255]
1184 1184
1185 1185 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1186 1186 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1187 1187 [255]
1188 1188
1189 1189 #if windows
1190 1190 $ hg clone "ssh://%26touch%20owned%20/" --debug
1191 1191 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1192 1192 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1193 1193 sending hello command
1194 1194 sending between command
1195 1195 abort: no suitable response from remote hg!
1196 1196 [255]
1197 1197 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1198 1198 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1199 1199 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1200 1200 sending hello command
1201 1201 sending between command
1202 1202 abort: no suitable response from remote hg!
1203 1203 [255]
1204 1204 #else
1205 1205 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1206 1206 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1207 1207 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1208 1208 sending hello command
1209 1209 sending between command
1210 1210 abort: no suitable response from remote hg!
1211 1211 [255]
1212 1212 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1213 1213 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1214 1214 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1215 1215 sending hello command
1216 1216 sending between command
1217 1217 abort: no suitable response from remote hg!
1218 1218 [255]
1219 1219 #endif
1220 1220
1221 1221 $ hg clone "ssh://v-alid.example.com/" --debug
1222 1222 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1223 1223 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1224 1224 sending hello command
1225 1225 sending between command
1226 1226 abort: no suitable response from remote hg!
1227 1227 [255]
1228 1228
1229 1229 We should not have created a file named owned - if it exists, the
1230 1230 attack succeeded.
1231 1231 $ if test -f owned; then echo 'you got owned'; fi
1232 1232
1233 1233 Cloning without fsmonitor enabled does not print a warning for small repos
1234 1234
1235 1235 $ hg clone a fsmonitor-default
1236 1236 updating to bookmark @ on branch stable
1237 1237 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1238 1238
1239 1239 Lower the warning threshold to simulate a large repo
1240 1240
1241 1241 $ cat >> $HGRCPATH << EOF
1242 1242 > [fsmonitor]
1243 1243 > warn_update_file_count = 2
1244 > warn_update_file_count_rust = 2
1244 1245 > EOF
1245 1246
1246 1247 We should see a warning about no fsmonitor on supported platforms
1247 1248
1248 1249 #if linuxormacos no-fsmonitor
1249 1250 $ hg clone a nofsmonitor
1250 1251 updating to bookmark @ on branch stable
1251 1252 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1252 1253 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1253 1254 #else
1254 1255 $ hg clone a nofsmonitor
1255 1256 updating to bookmark @ on branch stable
1256 1257 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1257 1258 #endif
1258 1259
1259 1260 We should not see warning about fsmonitor when it is enabled
1260 1261
1261 1262 #if fsmonitor
1262 1263 $ hg clone a fsmonitor-enabled
1263 1264 updating to bookmark @ on branch stable
1264 1265 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1265 1266 #endif
1266 1267
1267 1268 We can disable the fsmonitor warning
1268 1269
1269 1270 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1270 1271 updating to bookmark @ on branch stable
1271 1272 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1272 1273
1273 1274 Loaded fsmonitor but disabled in config should still print warning
1274 1275
1275 1276 #if linuxormacos fsmonitor
1276 1277 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1277 1278 updating to bookmark @ on branch stable
1278 1279 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1279 1280 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1280 1281 #endif
1281 1282
1282 1283 Warning not printed if working directory isn't empty
1283 1284
1284 1285 $ hg -q clone a fsmonitor-update
1285 1286 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1286 1287 $ cd fsmonitor-update
1287 1288 $ hg up acb14030fe0a
1288 1289 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1289 1290 (leaving bookmark @)
1290 1291 $ hg up cf0fe1914066
1291 1292 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1292 1293
1293 1294 `hg update` from null revision also prints
1294 1295
1295 1296 $ hg up null
1296 1297 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1297 1298
1298 1299 #if linuxormacos no-fsmonitor
1299 1300 $ hg up cf0fe1914066
1300 1301 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1301 1302 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1302 1303 #else
1303 1304 $ hg up cf0fe1914066
1304 1305 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1305 1306 #endif
1306 1307
1307 1308 $ cd ..
1308 1309
General Comments 0
You need to be logged in to leave comments. Login now