##// END OF EJS Templates
progress: create helper class for incrementing progress...
Martin von Zweigbergk -
r38364:bec1212e default
parent child Browse files
Show More
@@ -1,2247 +1,2232 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import shutil
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullhex,
22 22 nullid,
23 23 nullrev,
24 24 )
25 25 from .thirdparty import (
26 26 attr,
27 27 )
28 28 from . import (
29 29 copies,
30 30 error,
31 31 filemerge,
32 32 match as matchmod,
33 33 obsutil,
34 34 pycompat,
35 35 scmutil,
36 36 subrepoutil,
37 37 util,
38 38 worker,
39 39 )
40 40
41 41 _pack = struct.pack
42 42 _unpack = struct.unpack
43 43
44 44 def _droponode(data):
45 45 # used for compatibility for v1
46 46 bits = data.split('\0')
47 47 bits = bits[:-2] + bits[-1:]
48 48 return '\0'.join(bits)
49 49
50 50 # Merge state record types. See ``mergestate`` docs for more.
51 51 RECORD_LOCAL = b'L'
52 52 RECORD_OTHER = b'O'
53 53 RECORD_MERGED = b'F'
54 54 RECORD_CHANGEDELETE_CONFLICT = b'C'
55 55 RECORD_MERGE_DRIVER_MERGE = b'D'
56 56 RECORD_PATH_CONFLICT = b'P'
57 57 RECORD_MERGE_DRIVER_STATE = b'm'
58 58 RECORD_FILE_VALUES = b'f'
59 59 RECORD_LABELS = b'l'
60 60 RECORD_OVERRIDE = b't'
61 61 RECORD_UNSUPPORTED_MANDATORY = b'X'
62 62 RECORD_UNSUPPORTED_ADVISORY = b'x'
63 63
64 64 MERGE_DRIVER_STATE_UNMARKED = b'u'
65 65 MERGE_DRIVER_STATE_MARKED = b'm'
66 66 MERGE_DRIVER_STATE_SUCCESS = b's'
67 67
68 68 MERGE_RECORD_UNRESOLVED = b'u'
69 69 MERGE_RECORD_RESOLVED = b'r'
70 70 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
71 71 MERGE_RECORD_RESOLVED_PATH = b'pr'
72 72 MERGE_RECORD_DRIVER_RESOLVED = b'd'
73 73
74 74 ACTION_FORGET = b'f'
75 75 ACTION_REMOVE = b'r'
76 76 ACTION_ADD = b'a'
77 77 ACTION_GET = b'g'
78 78 ACTION_PATH_CONFLICT = b'p'
79 79 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
80 80 ACTION_ADD_MODIFIED = b'am'
81 81 ACTION_CREATED = b'c'
82 82 ACTION_DELETED_CHANGED = b'dc'
83 83 ACTION_CHANGED_DELETED = b'cd'
84 84 ACTION_MERGE = b'm'
85 85 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
86 86 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
87 87 ACTION_KEEP = b'k'
88 88 ACTION_EXEC = b'e'
89 89 ACTION_CREATED_MERGE = b'cm'
90 90
91 91 class mergestate(object):
92 92 '''track 3-way merge state of individual files
93 93
94 94 The merge state is stored on disk when needed. Two files are used: one with
95 95 an old format (version 1), and one with a new format (version 2). Version 2
96 96 stores a superset of the data in version 1, including new kinds of records
97 97 in the future. For more about the new format, see the documentation for
98 98 `_readrecordsv2`.
99 99
100 100 Each record can contain arbitrary content, and has an associated type. This
101 101 `type` should be a letter. If `type` is uppercase, the record is mandatory:
102 102 versions of Mercurial that don't support it should abort. If `type` is
103 103 lowercase, the record can be safely ignored.
104 104
105 105 Currently known records:
106 106
107 107 L: the node of the "local" part of the merge (hexified version)
108 108 O: the node of the "other" part of the merge (hexified version)
109 109 F: a file to be merged entry
110 110 C: a change/delete or delete/change conflict
111 111 D: a file that the external merge driver will merge internally
112 112 (experimental)
113 113 P: a path conflict (file vs directory)
114 114 m: the external merge driver defined for this merge plus its run state
115 115 (experimental)
116 116 f: a (filename, dictionary) tuple of optional values for a given file
117 117 X: unsupported mandatory record type (used in tests)
118 118 x: unsupported advisory record type (used in tests)
119 119 l: the labels for the parts of the merge.
120 120
121 121 Merge driver run states (experimental):
122 122 u: driver-resolved files unmarked -- needs to be run next time we're about
123 123 to resolve or commit
124 124 m: driver-resolved files marked -- only needs to be run before commit
125 125 s: success/skipped -- does not need to be run any more
126 126
127 127 Merge record states (stored in self._state, indexed by filename):
128 128 u: unresolved conflict
129 129 r: resolved conflict
130 130 pu: unresolved path conflict (file conflicts with directory)
131 131 pr: resolved path conflict
132 132 d: driver-resolved conflict
133 133
134 134 The resolve command transitions between 'u' and 'r' for conflicts and
135 135 'pu' and 'pr' for path conflicts.
136 136 '''
137 137 statepathv1 = 'merge/state'
138 138 statepathv2 = 'merge/state2'
139 139
140 140 @staticmethod
141 141 def clean(repo, node=None, other=None, labels=None):
142 142 """Initialize a brand new merge state, removing any existing state on
143 143 disk."""
144 144 ms = mergestate(repo)
145 145 ms.reset(node, other, labels)
146 146 return ms
147 147
148 148 @staticmethod
149 149 def read(repo):
150 150 """Initialize the merge state, reading it from disk."""
151 151 ms = mergestate(repo)
152 152 ms._read()
153 153 return ms
154 154
155 155 def __init__(self, repo):
156 156 """Initialize the merge state.
157 157
158 158 Do not use this directly! Instead call read() or clean()."""
159 159 self._repo = repo
160 160 self._dirty = False
161 161 self._labels = None
162 162
163 163 def reset(self, node=None, other=None, labels=None):
164 164 self._state = {}
165 165 self._stateextras = {}
166 166 self._local = None
167 167 self._other = None
168 168 self._labels = labels
169 169 for var in ('localctx', 'otherctx'):
170 170 if var in vars(self):
171 171 delattr(self, var)
172 172 if node:
173 173 self._local = node
174 174 self._other = other
175 175 self._readmergedriver = None
176 176 if self.mergedriver:
177 177 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
178 178 else:
179 179 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
180 180 shutil.rmtree(self._repo.vfs.join('merge'), True)
181 181 self._results = {}
182 182 self._dirty = False
183 183
184 184 def _read(self):
185 185 """Analyse each record content to restore a serialized state from disk
186 186
187 187 This function process "record" entry produced by the de-serialization
188 188 of on disk file.
189 189 """
190 190 self._state = {}
191 191 self._stateextras = {}
192 192 self._local = None
193 193 self._other = None
194 194 for var in ('localctx', 'otherctx'):
195 195 if var in vars(self):
196 196 delattr(self, var)
197 197 self._readmergedriver = None
198 198 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
199 199 unsupported = set()
200 200 records = self._readrecords()
201 201 for rtype, record in records:
202 202 if rtype == RECORD_LOCAL:
203 203 self._local = bin(record)
204 204 elif rtype == RECORD_OTHER:
205 205 self._other = bin(record)
206 206 elif rtype == RECORD_MERGE_DRIVER_STATE:
207 207 bits = record.split('\0', 1)
208 208 mdstate = bits[1]
209 209 if len(mdstate) != 1 or mdstate not in (
210 210 MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
211 211 MERGE_DRIVER_STATE_SUCCESS):
212 212 # the merge driver should be idempotent, so just rerun it
213 213 mdstate = MERGE_DRIVER_STATE_UNMARKED
214 214
215 215 self._readmergedriver = bits[0]
216 216 self._mdstate = mdstate
217 217 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
218 218 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
219 219 bits = record.split('\0')
220 220 self._state[bits[0]] = bits[1:]
221 221 elif rtype == RECORD_FILE_VALUES:
222 222 filename, rawextras = record.split('\0', 1)
223 223 extraparts = rawextras.split('\0')
224 224 extras = {}
225 225 i = 0
226 226 while i < len(extraparts):
227 227 extras[extraparts[i]] = extraparts[i + 1]
228 228 i += 2
229 229
230 230 self._stateextras[filename] = extras
231 231 elif rtype == RECORD_LABELS:
232 232 labels = record.split('\0', 2)
233 233 self._labels = [l for l in labels if len(l) > 0]
234 234 elif not rtype.islower():
235 235 unsupported.add(rtype)
236 236 self._results = {}
237 237 self._dirty = False
238 238
239 239 if unsupported:
240 240 raise error.UnsupportedMergeRecords(unsupported)
241 241
242 242 def _readrecords(self):
243 243 """Read merge state from disk and return a list of record (TYPE, data)
244 244
245 245 We read data from both v1 and v2 files and decide which one to use.
246 246
247 247 V1 has been used by version prior to 2.9.1 and contains less data than
248 248 v2. We read both versions and check if no data in v2 contradicts
249 249 v1. If there is not contradiction we can safely assume that both v1
250 250 and v2 were written at the same time and use the extract data in v2. If
251 251 there is contradiction we ignore v2 content as we assume an old version
252 252 of Mercurial has overwritten the mergestate file and left an old v2
253 253 file around.
254 254
255 255 returns list of record [(TYPE, data), ...]"""
256 256 v1records = self._readrecordsv1()
257 257 v2records = self._readrecordsv2()
258 258 if self._v1v2match(v1records, v2records):
259 259 return v2records
260 260 else:
261 261 # v1 file is newer than v2 file, use it
262 262 # we have to infer the "other" changeset of the merge
263 263 # we cannot do better than that with v1 of the format
264 264 mctx = self._repo[None].parents()[-1]
265 265 v1records.append((RECORD_OTHER, mctx.hex()))
266 266 # add place holder "other" file node information
267 267 # nobody is using it yet so we do no need to fetch the data
268 268 # if mctx was wrong `mctx[bits[-2]]` may fails.
269 269 for idx, r in enumerate(v1records):
270 270 if r[0] == RECORD_MERGED:
271 271 bits = r[1].split('\0')
272 272 bits.insert(-2, '')
273 273 v1records[idx] = (r[0], '\0'.join(bits))
274 274 return v1records
275 275
276 276 def _v1v2match(self, v1records, v2records):
277 277 oldv2 = set() # old format version of v2 record
278 278 for rec in v2records:
279 279 if rec[0] == RECORD_LOCAL:
280 280 oldv2.add(rec)
281 281 elif rec[0] == RECORD_MERGED:
282 282 # drop the onode data (not contained in v1)
283 283 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
284 284 for rec in v1records:
285 285 if rec not in oldv2:
286 286 return False
287 287 else:
288 288 return True
289 289
290 290 def _readrecordsv1(self):
291 291 """read on disk merge state for version 1 file
292 292
293 293 returns list of record [(TYPE, data), ...]
294 294
295 295 Note: the "F" data from this file are one entry short
296 296 (no "other file node" entry)
297 297 """
298 298 records = []
299 299 try:
300 300 f = self._repo.vfs(self.statepathv1)
301 301 for i, l in enumerate(f):
302 302 if i == 0:
303 303 records.append((RECORD_LOCAL, l[:-1]))
304 304 else:
305 305 records.append((RECORD_MERGED, l[:-1]))
306 306 f.close()
307 307 except IOError as err:
308 308 if err.errno != errno.ENOENT:
309 309 raise
310 310 return records
311 311
312 312 def _readrecordsv2(self):
313 313 """read on disk merge state for version 2 file
314 314
315 315 This format is a list of arbitrary records of the form:
316 316
317 317 [type][length][content]
318 318
319 319 `type` is a single character, `length` is a 4 byte integer, and
320 320 `content` is an arbitrary byte sequence of length `length`.
321 321
322 322 Mercurial versions prior to 3.7 have a bug where if there are
323 323 unsupported mandatory merge records, attempting to clear out the merge
324 324 state with hg update --clean or similar aborts. The 't' record type
325 325 works around that by writing out what those versions treat as an
326 326 advisory record, but later versions interpret as special: the first
327 327 character is the 'real' record type and everything onwards is the data.
328 328
329 329 Returns list of records [(TYPE, data), ...]."""
330 330 records = []
331 331 try:
332 332 f = self._repo.vfs(self.statepathv2)
333 333 data = f.read()
334 334 off = 0
335 335 end = len(data)
336 336 while off < end:
337 337 rtype = data[off:off + 1]
338 338 off += 1
339 339 length = _unpack('>I', data[off:(off + 4)])[0]
340 340 off += 4
341 341 record = data[off:(off + length)]
342 342 off += length
343 343 if rtype == RECORD_OVERRIDE:
344 344 rtype, record = record[0:1], record[1:]
345 345 records.append((rtype, record))
346 346 f.close()
347 347 except IOError as err:
348 348 if err.errno != errno.ENOENT:
349 349 raise
350 350 return records
351 351
352 352 @util.propertycache
353 353 def mergedriver(self):
354 354 # protect against the following:
355 355 # - A configures a malicious merge driver in their hgrc, then
356 356 # pauses the merge
357 357 # - A edits their hgrc to remove references to the merge driver
358 358 # - A gives a copy of their entire repo, including .hg, to B
359 359 # - B inspects .hgrc and finds it to be clean
360 360 # - B then continues the merge and the malicious merge driver
361 361 # gets invoked
362 362 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
363 363 if (self._readmergedriver is not None
364 364 and self._readmergedriver != configmergedriver):
365 365 raise error.ConfigError(
366 366 _("merge driver changed since merge started"),
367 367 hint=_("revert merge driver change or abort merge"))
368 368
369 369 return configmergedriver
370 370
371 371 @util.propertycache
372 372 def localctx(self):
373 373 if self._local is None:
374 374 msg = "localctx accessed but self._local isn't set"
375 375 raise error.ProgrammingError(msg)
376 376 return self._repo[self._local]
377 377
378 378 @util.propertycache
379 379 def otherctx(self):
380 380 if self._other is None:
381 381 msg = "otherctx accessed but self._other isn't set"
382 382 raise error.ProgrammingError(msg)
383 383 return self._repo[self._other]
384 384
385 385 def active(self):
386 386 """Whether mergestate is active.
387 387
388 388 Returns True if there appears to be mergestate. This is a rough proxy
389 389 for "is a merge in progress."
390 390 """
391 391 # Check local variables before looking at filesystem for performance
392 392 # reasons.
393 393 return bool(self._local) or bool(self._state) or \
394 394 self._repo.vfs.exists(self.statepathv1) or \
395 395 self._repo.vfs.exists(self.statepathv2)
396 396
397 397 def commit(self):
398 398 """Write current state on disk (if necessary)"""
399 399 if self._dirty:
400 400 records = self._makerecords()
401 401 self._writerecords(records)
402 402 self._dirty = False
403 403
404 404 def _makerecords(self):
405 405 records = []
406 406 records.append((RECORD_LOCAL, hex(self._local)))
407 407 records.append((RECORD_OTHER, hex(self._other)))
408 408 if self.mergedriver:
409 409 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
410 410 self.mergedriver, self._mdstate])))
411 411 # Write out state items. In all cases, the value of the state map entry
412 412 # is written as the contents of the record. The record type depends on
413 413 # the type of state that is stored, and capital-letter records are used
414 414 # to prevent older versions of Mercurial that do not support the feature
415 415 # from loading them.
416 416 for filename, v in self._state.iteritems():
417 417 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
418 418 # Driver-resolved merge. These are stored in 'D' records.
419 419 records.append((RECORD_MERGE_DRIVER_MERGE,
420 420 '\0'.join([filename] + v)))
421 421 elif v[0] in (MERGE_RECORD_UNRESOLVED_PATH,
422 422 MERGE_RECORD_RESOLVED_PATH):
423 423 # Path conflicts. These are stored in 'P' records. The current
424 424 # resolution state ('pu' or 'pr') is stored within the record.
425 425 records.append((RECORD_PATH_CONFLICT,
426 426 '\0'.join([filename] + v)))
427 427 elif v[1] == nullhex or v[6] == nullhex:
428 428 # Change/Delete or Delete/Change conflicts. These are stored in
429 429 # 'C' records. v[1] is the local file, and is nullhex when the
430 430 # file is deleted locally ('dc'). v[6] is the remote file, and
431 431 # is nullhex when the file is deleted remotely ('cd').
432 432 records.append((RECORD_CHANGEDELETE_CONFLICT,
433 433 '\0'.join([filename] + v)))
434 434 else:
435 435 # Normal files. These are stored in 'F' records.
436 436 records.append((RECORD_MERGED,
437 437 '\0'.join([filename] + v)))
438 438 for filename, extras in sorted(self._stateextras.iteritems()):
439 439 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
440 440 extras.iteritems())
441 441 records.append((RECORD_FILE_VALUES,
442 442 '%s\0%s' % (filename, rawextras)))
443 443 if self._labels is not None:
444 444 labels = '\0'.join(self._labels)
445 445 records.append((RECORD_LABELS, labels))
446 446 return records
447 447
448 448 def _writerecords(self, records):
449 449 """Write current state on disk (both v1 and v2)"""
450 450 self._writerecordsv1(records)
451 451 self._writerecordsv2(records)
452 452
453 453 def _writerecordsv1(self, records):
454 454 """Write current state on disk in a version 1 file"""
455 455 f = self._repo.vfs(self.statepathv1, 'wb')
456 456 irecords = iter(records)
457 457 lrecords = next(irecords)
458 458 assert lrecords[0] == RECORD_LOCAL
459 459 f.write(hex(self._local) + '\n')
460 460 for rtype, data in irecords:
461 461 if rtype == RECORD_MERGED:
462 462 f.write('%s\n' % _droponode(data))
463 463 f.close()
464 464
465 465 def _writerecordsv2(self, records):
466 466 """Write current state on disk in a version 2 file
467 467
468 468 See the docstring for _readrecordsv2 for why we use 't'."""
469 469 # these are the records that all version 2 clients can read
470 470 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
471 471 f = self._repo.vfs(self.statepathv2, 'wb')
472 472 for key, data in records:
473 473 assert len(key) == 1
474 474 if key not in allowlist:
475 475 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
476 476 format = '>sI%is' % len(data)
477 477 f.write(_pack(format, key, len(data), data))
478 478 f.close()
479 479
480 480 def add(self, fcl, fco, fca, fd):
481 481 """add a new (potentially?) conflicting file the merge state
482 482 fcl: file context for local,
483 483 fco: file context for remote,
484 484 fca: file context for ancestors,
485 485 fd: file path of the resulting merge.
486 486
487 487 note: also write the local version to the `.hg/merge` directory.
488 488 """
489 489 if fcl.isabsent():
490 490 hash = nullhex
491 491 else:
492 492 hash = hex(hashlib.sha1(fcl.path()).digest())
493 493 self._repo.vfs.write('merge/' + hash, fcl.data())
494 494 self._state[fd] = [MERGE_RECORD_UNRESOLVED, hash, fcl.path(),
495 495 fca.path(), hex(fca.filenode()),
496 496 fco.path(), hex(fco.filenode()),
497 497 fcl.flags()]
498 498 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
499 499 self._dirty = True
500 500
501 501 def addpath(self, path, frename, forigin):
502 502 """add a new conflicting path to the merge state
503 503 path: the path that conflicts
504 504 frename: the filename the conflicting file was renamed to
505 505 forigin: origin of the file ('l' or 'r' for local/remote)
506 506 """
507 507 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
508 508 self._dirty = True
509 509
510 510 def __contains__(self, dfile):
511 511 return dfile in self._state
512 512
513 513 def __getitem__(self, dfile):
514 514 return self._state[dfile][0]
515 515
516 516 def __iter__(self):
517 517 return iter(sorted(self._state))
518 518
519 519 def files(self):
520 520 return self._state.keys()
521 521
522 522 def mark(self, dfile, state):
523 523 self._state[dfile][0] = state
524 524 self._dirty = True
525 525
526 526 def mdstate(self):
527 527 return self._mdstate
528 528
529 529 def unresolved(self):
530 530 """Obtain the paths of unresolved files."""
531 531
532 532 for f, entry in self._state.iteritems():
533 533 if entry[0] in (MERGE_RECORD_UNRESOLVED,
534 534 MERGE_RECORD_UNRESOLVED_PATH):
535 535 yield f
536 536
537 537 def driverresolved(self):
538 538 """Obtain the paths of driver-resolved files."""
539 539
540 540 for f, entry in self._state.items():
541 541 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
542 542 yield f
543 543
544 544 def extras(self, filename):
545 545 return self._stateextras.setdefault(filename, {})
546 546
547 547 def _resolve(self, preresolve, dfile, wctx):
548 548 """rerun merge process for file path `dfile`"""
549 549 if self[dfile] in (MERGE_RECORD_RESOLVED,
550 550 MERGE_RECORD_DRIVER_RESOLVED):
551 551 return True, 0
552 552 stateentry = self._state[dfile]
553 553 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
554 554 octx = self._repo[self._other]
555 555 extras = self.extras(dfile)
556 556 anccommitnode = extras.get('ancestorlinknode')
557 557 if anccommitnode:
558 558 actx = self._repo[anccommitnode]
559 559 else:
560 560 actx = None
561 561 fcd = self._filectxorabsent(hash, wctx, dfile)
562 562 fco = self._filectxorabsent(onode, octx, ofile)
563 563 # TODO: move this to filectxorabsent
564 564 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
565 565 # "premerge" x flags
566 566 flo = fco.flags()
567 567 fla = fca.flags()
568 568 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
569 569 if fca.node() == nullid and flags != flo:
570 570 if preresolve:
571 571 self._repo.ui.warn(
572 572 _('warning: cannot merge flags for %s '
573 573 'without common ancestor - keeping local flags\n')
574 574 % afile)
575 575 elif flags == fla:
576 576 flags = flo
577 577 if preresolve:
578 578 # restore local
579 579 if hash != nullhex:
580 580 f = self._repo.vfs('merge/' + hash)
581 581 wctx[dfile].write(f.read(), flags)
582 582 f.close()
583 583 else:
584 584 wctx[dfile].remove(ignoremissing=True)
585 585 complete, r, deleted = filemerge.premerge(self._repo, wctx,
586 586 self._local, lfile, fcd,
587 587 fco, fca,
588 588 labels=self._labels)
589 589 else:
590 590 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
591 591 self._local, lfile, fcd,
592 592 fco, fca,
593 593 labels=self._labels)
594 594 if r is None:
595 595 # no real conflict
596 596 del self._state[dfile]
597 597 self._stateextras.pop(dfile, None)
598 598 self._dirty = True
599 599 elif not r:
600 600 self.mark(dfile, MERGE_RECORD_RESOLVED)
601 601
602 602 if complete:
603 603 action = None
604 604 if deleted:
605 605 if fcd.isabsent():
606 606 # dc: local picked. Need to drop if present, which may
607 607 # happen on re-resolves.
608 608 action = ACTION_FORGET
609 609 else:
610 610 # cd: remote picked (or otherwise deleted)
611 611 action = ACTION_REMOVE
612 612 else:
613 613 if fcd.isabsent(): # dc: remote picked
614 614 action = ACTION_GET
615 615 elif fco.isabsent(): # cd: local picked
616 616 if dfile in self.localctx:
617 617 action = ACTION_ADD_MODIFIED
618 618 else:
619 619 action = ACTION_ADD
620 620 # else: regular merges (no action necessary)
621 621 self._results[dfile] = r, action
622 622
623 623 return complete, r
624 624
625 625 def _filectxorabsent(self, hexnode, ctx, f):
626 626 if hexnode == nullhex:
627 627 return filemerge.absentfilectx(ctx, f)
628 628 else:
629 629 return ctx[f]
630 630
631 631 def preresolve(self, dfile, wctx):
632 632 """run premerge process for dfile
633 633
634 634 Returns whether the merge is complete, and the exit code."""
635 635 return self._resolve(True, dfile, wctx)
636 636
637 637 def resolve(self, dfile, wctx):
638 638 """run merge process (assuming premerge was run) for dfile
639 639
640 640 Returns the exit code of the merge."""
641 641 return self._resolve(False, dfile, wctx)[1]
642 642
643 643 def counts(self):
644 644 """return counts for updated, merged and removed files in this
645 645 session"""
646 646 updated, merged, removed = 0, 0, 0
647 647 for r, action in self._results.itervalues():
648 648 if r is None:
649 649 updated += 1
650 650 elif r == 0:
651 651 if action == ACTION_REMOVE:
652 652 removed += 1
653 653 else:
654 654 merged += 1
655 655 return updated, merged, removed
656 656
657 657 def unresolvedcount(self):
658 658 """get unresolved count for this merge (persistent)"""
659 659 return len(list(self.unresolved()))
660 660
661 661 def actions(self):
662 662 """return lists of actions to perform on the dirstate"""
663 663 actions = {
664 664 ACTION_REMOVE: [],
665 665 ACTION_FORGET: [],
666 666 ACTION_ADD: [],
667 667 ACTION_ADD_MODIFIED: [],
668 668 ACTION_GET: [],
669 669 }
670 670 for f, (r, action) in self._results.iteritems():
671 671 if action is not None:
672 672 actions[action].append((f, None, "merge result"))
673 673 return actions
674 674
675 675 def recordactions(self):
676 676 """record remove/add/get actions in the dirstate"""
677 677 branchmerge = self._repo.dirstate.p2() != nullid
678 678 recordupdates(self._repo, self.actions(), branchmerge)
679 679
680 680 def queueremove(self, f):
681 681 """queues a file to be removed from the dirstate
682 682
683 683 Meant for use by custom merge drivers."""
684 684 self._results[f] = 0, ACTION_REMOVE
685 685
686 686 def queueadd(self, f):
687 687 """queues a file to be added to the dirstate
688 688
689 689 Meant for use by custom merge drivers."""
690 690 self._results[f] = 0, ACTION_ADD
691 691
692 692 def queueget(self, f):
693 693 """queues a file to be marked modified in the dirstate
694 694
695 695 Meant for use by custom merge drivers."""
696 696 self._results[f] = 0, ACTION_GET
697 697
698 698 def _getcheckunknownconfig(repo, section, name):
699 699 config = repo.ui.config(section, name)
700 700 valid = ['abort', 'ignore', 'warn']
701 701 if config not in valid:
702 702 validstr = ', '.join(["'" + v + "'" for v in valid])
703 703 raise error.ConfigError(_("%s.%s not valid "
704 704 "('%s' is none of %s)")
705 705 % (section, name, config, validstr))
706 706 return config
707 707
708 708 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
709 709 if wctx.isinmemory():
710 710 # Nothing to do in IMM because nothing in the "working copy" can be an
711 711 # unknown file.
712 712 #
713 713 # Note that we should bail out here, not in ``_checkunknownfiles()``,
714 714 # because that function does other useful work.
715 715 return False
716 716
717 717 if f2 is None:
718 718 f2 = f
719 719 return (repo.wvfs.audit.check(f)
720 720 and repo.wvfs.isfileorlink(f)
721 721 and repo.dirstate.normalize(f) not in repo.dirstate
722 722 and mctx[f2].cmp(wctx[f]))
723 723
724 724 class _unknowndirschecker(object):
725 725 """
726 726 Look for any unknown files or directories that may have a path conflict
727 727 with a file. If any path prefix of the file exists as a file or link,
728 728 then it conflicts. If the file itself is a directory that contains any
729 729 file that is not tracked, then it conflicts.
730 730
731 731 Returns the shortest path at which a conflict occurs, or None if there is
732 732 no conflict.
733 733 """
734 734 def __init__(self):
735 735 # A set of paths known to be good. This prevents repeated checking of
736 736 # dirs. It will be updated with any new dirs that are checked and found
737 737 # to be safe.
738 738 self._unknowndircache = set()
739 739
740 740 # A set of paths that are known to be absent. This prevents repeated
741 741 # checking of subdirectories that are known not to exist. It will be
742 742 # updated with any new dirs that are checked and found to be absent.
743 743 self._missingdircache = set()
744 744
745 745 def __call__(self, repo, wctx, f):
746 746 if wctx.isinmemory():
747 747 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
748 748 return False
749 749
750 750 # Check for path prefixes that exist as unknown files.
751 751 for p in reversed(list(util.finddirs(f))):
752 752 if p in self._missingdircache:
753 753 return
754 754 if p in self._unknowndircache:
755 755 continue
756 756 if repo.wvfs.audit.check(p):
757 757 if (repo.wvfs.isfileorlink(p)
758 758 and repo.dirstate.normalize(p) not in repo.dirstate):
759 759 return p
760 760 if not repo.wvfs.lexists(p):
761 761 self._missingdircache.add(p)
762 762 return
763 763 self._unknowndircache.add(p)
764 764
765 765 # Check if the file conflicts with a directory containing unknown files.
766 766 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
767 767 # Does the directory contain any files that are not in the dirstate?
768 768 for p, dirs, files in repo.wvfs.walk(f):
769 769 for fn in files:
770 770 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
771 771 relf = repo.dirstate.normalize(relf, isknown=True)
772 772 if relf not in repo.dirstate:
773 773 return f
774 774 return None
775 775
776 776 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
777 777 """
778 778 Considers any actions that care about the presence of conflicting unknown
779 779 files. For some actions, the result is to abort; for others, it is to
780 780 choose a different action.
781 781 """
782 782 fileconflicts = set()
783 783 pathconflicts = set()
784 784 warnconflicts = set()
785 785 abortconflicts = set()
786 786 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
787 787 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
788 788 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
789 789 if not force:
790 790 def collectconflicts(conflicts, config):
791 791 if config == 'abort':
792 792 abortconflicts.update(conflicts)
793 793 elif config == 'warn':
794 794 warnconflicts.update(conflicts)
795 795
796 796 checkunknowndirs = _unknowndirschecker()
797 797 for f, (m, args, msg) in actions.iteritems():
798 798 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
799 799 if _checkunknownfile(repo, wctx, mctx, f):
800 800 fileconflicts.add(f)
801 801 elif pathconfig and f not in wctx:
802 802 path = checkunknowndirs(repo, wctx, f)
803 803 if path is not None:
804 804 pathconflicts.add(path)
805 805 elif m == ACTION_LOCAL_DIR_RENAME_GET:
806 806 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
807 807 fileconflicts.add(f)
808 808
809 809 allconflicts = fileconflicts | pathconflicts
810 810 ignoredconflicts = set([c for c in allconflicts
811 811 if repo.dirstate._ignore(c)])
812 812 unknownconflicts = allconflicts - ignoredconflicts
813 813 collectconflicts(ignoredconflicts, ignoredconfig)
814 814 collectconflicts(unknownconflicts, unknownconfig)
815 815 else:
816 816 for f, (m, args, msg) in actions.iteritems():
817 817 if m == ACTION_CREATED_MERGE:
818 818 fl2, anc = args
819 819 different = _checkunknownfile(repo, wctx, mctx, f)
820 820 if repo.dirstate._ignore(f):
821 821 config = ignoredconfig
822 822 else:
823 823 config = unknownconfig
824 824
825 825 # The behavior when force is True is described by this table:
826 826 # config different mergeforce | action backup
827 827 # * n * | get n
828 828 # * y y | merge -
829 829 # abort y n | merge - (1)
830 830 # warn y n | warn + get y
831 831 # ignore y n | get y
832 832 #
833 833 # (1) this is probably the wrong behavior here -- we should
834 834 # probably abort, but some actions like rebases currently
835 835 # don't like an abort happening in the middle of
836 836 # merge.update.
837 837 if not different:
838 838 actions[f] = (ACTION_GET, (fl2, False), 'remote created')
839 839 elif mergeforce or config == 'abort':
840 840 actions[f] = (ACTION_MERGE, (f, f, None, False, anc),
841 841 'remote differs from untracked local')
842 842 elif config == 'abort':
843 843 abortconflicts.add(f)
844 844 else:
845 845 if config == 'warn':
846 846 warnconflicts.add(f)
847 847 actions[f] = (ACTION_GET, (fl2, True), 'remote created')
848 848
849 849 for f in sorted(abortconflicts):
850 850 warn = repo.ui.warn
851 851 if f in pathconflicts:
852 852 if repo.wvfs.isfileorlink(f):
853 853 warn(_("%s: untracked file conflicts with directory\n") % f)
854 854 else:
855 855 warn(_("%s: untracked directory conflicts with file\n") % f)
856 856 else:
857 857 warn(_("%s: untracked file differs\n") % f)
858 858 if abortconflicts:
859 859 raise error.Abort(_("untracked files in working directory "
860 860 "differ from files in requested revision"))
861 861
862 862 for f in sorted(warnconflicts):
863 863 if repo.wvfs.isfileorlink(f):
864 864 repo.ui.warn(_("%s: replacing untracked file\n") % f)
865 865 else:
866 866 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
867 867
868 868 for f, (m, args, msg) in actions.iteritems():
869 869 if m == ACTION_CREATED:
870 870 backup = (f in fileconflicts or f in pathconflicts or
871 871 any(p in pathconflicts for p in util.finddirs(f)))
872 872 flags, = args
873 873 actions[f] = (ACTION_GET, (flags, backup), msg)
874 874
875 875 def _forgetremoved(wctx, mctx, branchmerge):
876 876 """
877 877 Forget removed files
878 878
879 879 If we're jumping between revisions (as opposed to merging), and if
880 880 neither the working directory nor the target rev has the file,
881 881 then we need to remove it from the dirstate, to prevent the
882 882 dirstate from listing the file when it is no longer in the
883 883 manifest.
884 884
885 885 If we're merging, and the other revision has removed a file
886 886 that is not present in the working directory, we need to mark it
887 887 as removed.
888 888 """
889 889
890 890 actions = {}
891 891 m = ACTION_FORGET
892 892 if branchmerge:
893 893 m = ACTION_REMOVE
894 894 for f in wctx.deleted():
895 895 if f not in mctx:
896 896 actions[f] = m, None, "forget deleted"
897 897
898 898 if not branchmerge:
899 899 for f in wctx.removed():
900 900 if f not in mctx:
901 901 actions[f] = ACTION_FORGET, None, "forget removed"
902 902
903 903 return actions
904 904
905 905 def _checkcollision(repo, wmf, actions):
906 906 """
907 907 Check for case-folding collisions.
908 908 """
909 909
910 910 # If the repo is narrowed, filter out files outside the narrowspec.
911 911 narrowmatch = repo.narrowmatch()
912 912 if not narrowmatch.always():
913 913 wmf = wmf.matches(narrowmatch)
914 914 if actions:
915 915 narrowactions = {}
916 916 for m, actionsfortype in actions.iteritems():
917 917 narrowactions[m] = []
918 918 for (f, args, msg) in actionsfortype:
919 919 if narrowmatch(f):
920 920 narrowactions[m].append((f, args, msg))
921 921 actions = narrowactions
922 922
923 923 # build provisional merged manifest up
924 924 pmmf = set(wmf)
925 925
926 926 if actions:
927 927 # KEEP and EXEC are no-op
928 928 for m in (ACTION_ADD, ACTION_ADD_MODIFIED, ACTION_FORGET, ACTION_GET,
929 929 ACTION_CHANGED_DELETED, ACTION_DELETED_CHANGED):
930 930 for f, args, msg in actions[m]:
931 931 pmmf.add(f)
932 932 for f, args, msg in actions[ACTION_REMOVE]:
933 933 pmmf.discard(f)
934 934 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
935 935 f2, flags = args
936 936 pmmf.discard(f2)
937 937 pmmf.add(f)
938 938 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
939 939 pmmf.add(f)
940 940 for f, args, msg in actions[ACTION_MERGE]:
941 941 f1, f2, fa, move, anc = args
942 942 if move:
943 943 pmmf.discard(f1)
944 944 pmmf.add(f)
945 945
946 946 # check case-folding collision in provisional merged manifest
947 947 foldmap = {}
948 948 for f in pmmf:
949 949 fold = util.normcase(f)
950 950 if fold in foldmap:
951 951 raise error.Abort(_("case-folding collision between %s and %s")
952 952 % (f, foldmap[fold]))
953 953 foldmap[fold] = f
954 954
955 955 # check case-folding of directories
956 956 foldprefix = unfoldprefix = lastfull = ''
957 957 for fold, f in sorted(foldmap.items()):
958 958 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
959 959 # the folded prefix matches but actual casing is different
960 960 raise error.Abort(_("case-folding collision between "
961 961 "%s and directory of %s") % (lastfull, f))
962 962 foldprefix = fold + '/'
963 963 unfoldprefix = f + '/'
964 964 lastfull = f
965 965
966 966 def driverpreprocess(repo, ms, wctx, labels=None):
967 967 """run the preprocess step of the merge driver, if any
968 968
969 969 This is currently not implemented -- it's an extension point."""
970 970 return True
971 971
972 972 def driverconclude(repo, ms, wctx, labels=None):
973 973 """run the conclude step of the merge driver, if any
974 974
975 975 This is currently not implemented -- it's an extension point."""
976 976 return True
977 977
978 978 def _filesindirs(repo, manifest, dirs):
979 979 """
980 980 Generator that yields pairs of all the files in the manifest that are found
981 981 inside the directories listed in dirs, and which directory they are found
982 982 in.
983 983 """
984 984 for f in manifest:
985 985 for p in util.finddirs(f):
986 986 if p in dirs:
987 987 yield f, p
988 988 break
989 989
990 990 def checkpathconflicts(repo, wctx, mctx, actions):
991 991 """
992 992 Check if any actions introduce path conflicts in the repository, updating
993 993 actions to record or handle the path conflict accordingly.
994 994 """
995 995 mf = wctx.manifest()
996 996
997 997 # The set of local files that conflict with a remote directory.
998 998 localconflicts = set()
999 999
1000 1000 # The set of directories that conflict with a remote file, and so may cause
1001 1001 # conflicts if they still contain any files after the merge.
1002 1002 remoteconflicts = set()
1003 1003
1004 1004 # The set of directories that appear as both a file and a directory in the
1005 1005 # remote manifest. These indicate an invalid remote manifest, which
1006 1006 # can't be updated to cleanly.
1007 1007 invalidconflicts = set()
1008 1008
1009 1009 # The set of directories that contain files that are being created.
1010 1010 createdfiledirs = set()
1011 1011
1012 1012 # The set of files deleted by all the actions.
1013 1013 deletedfiles = set()
1014 1014
1015 1015 for f, (m, args, msg) in actions.items():
1016 1016 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED, ACTION_MERGE,
1017 1017 ACTION_CREATED_MERGE):
1018 1018 # This action may create a new local file.
1019 1019 createdfiledirs.update(util.finddirs(f))
1020 1020 if mf.hasdir(f):
1021 1021 # The file aliases a local directory. This might be ok if all
1022 1022 # the files in the local directory are being deleted. This
1023 1023 # will be checked once we know what all the deleted files are.
1024 1024 remoteconflicts.add(f)
1025 1025 # Track the names of all deleted files.
1026 1026 if m == ACTION_REMOVE:
1027 1027 deletedfiles.add(f)
1028 1028 if m == ACTION_MERGE:
1029 1029 f1, f2, fa, move, anc = args
1030 1030 if move:
1031 1031 deletedfiles.add(f1)
1032 1032 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1033 1033 f2, flags = args
1034 1034 deletedfiles.add(f2)
1035 1035
1036 1036 # Check all directories that contain created files for path conflicts.
1037 1037 for p in createdfiledirs:
1038 1038 if p in mf:
1039 1039 if p in mctx:
1040 1040 # A file is in a directory which aliases both a local
1041 1041 # and a remote file. This is an internal inconsistency
1042 1042 # within the remote manifest.
1043 1043 invalidconflicts.add(p)
1044 1044 else:
1045 1045 # A file is in a directory which aliases a local file.
1046 1046 # We will need to rename the local file.
1047 1047 localconflicts.add(p)
1048 1048 if p in actions and actions[p][0] in (ACTION_CREATED,
1049 1049 ACTION_DELETED_CHANGED,
1050 1050 ACTION_MERGE,
1051 1051 ACTION_CREATED_MERGE):
1052 1052 # The file is in a directory which aliases a remote file.
1053 1053 # This is an internal inconsistency within the remote
1054 1054 # manifest.
1055 1055 invalidconflicts.add(p)
1056 1056
1057 1057 # Rename all local conflicting files that have not been deleted.
1058 1058 for p in localconflicts:
1059 1059 if p not in deletedfiles:
1060 1060 ctxname = bytes(wctx).rstrip('+')
1061 1061 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1062 1062 actions[pnew] = (ACTION_PATH_CONFLICT_RESOLVE, (p,),
1063 1063 'local path conflict')
1064 1064 actions[p] = (ACTION_PATH_CONFLICT, (pnew, 'l'),
1065 1065 'path conflict')
1066 1066
1067 1067 if remoteconflicts:
1068 1068 # Check if all files in the conflicting directories have been removed.
1069 1069 ctxname = bytes(mctx).rstrip('+')
1070 1070 for f, p in _filesindirs(repo, mf, remoteconflicts):
1071 1071 if f not in deletedfiles:
1072 1072 m, args, msg = actions[p]
1073 1073 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1074 1074 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1075 1075 # Action was merge, just update target.
1076 1076 actions[pnew] = (m, args, msg)
1077 1077 else:
1078 1078 # Action was create, change to renamed get action.
1079 1079 fl = args[0]
1080 1080 actions[pnew] = (ACTION_LOCAL_DIR_RENAME_GET, (p, fl),
1081 1081 'remote path conflict')
1082 1082 actions[p] = (ACTION_PATH_CONFLICT, (pnew, ACTION_REMOVE),
1083 1083 'path conflict')
1084 1084 remoteconflicts.remove(p)
1085 1085 break
1086 1086
1087 1087 if invalidconflicts:
1088 1088 for p in invalidconflicts:
1089 1089 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1090 1090 raise error.Abort(_("destination manifest contains path conflicts"))
1091 1091
1092 1092 def _filternarrowactions(narrowmatch, branchmerge, actions):
1093 1093 """
1094 1094 Filters out actions that can ignored because the repo is narrowed.
1095 1095
1096 1096 Raise an exception if the merge cannot be completed because the repo is
1097 1097 narrowed.
1098 1098 """
1099 1099 nooptypes = set(['k']) # TODO: handle with nonconflicttypes
1100 1100 nonconflicttypes = set('a am c cm f g r e'.split())
1101 1101 # We mutate the items in the dict during iteration, so iterate
1102 1102 # over a copy.
1103 1103 for f, action in list(actions.items()):
1104 1104 if narrowmatch(f):
1105 1105 pass
1106 1106 elif not branchmerge:
1107 1107 del actions[f] # just updating, ignore changes outside clone
1108 1108 elif action[0] in nooptypes:
1109 1109 del actions[f] # merge does not affect file
1110 1110 elif action[0] in nonconflicttypes:
1111 1111 raise error.Abort(_('merge affects file \'%s\' outside narrow, '
1112 1112 'which is not yet supported') % f,
1113 1113 hint=_('merging in the other direction '
1114 1114 'may work'))
1115 1115 else:
1116 1116 raise error.Abort(_('conflict in file \'%s\' is outside '
1117 1117 'narrow clone') % f)
1118 1118
1119 1119 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1120 1120 acceptremote, followcopies, forcefulldiff=False):
1121 1121 """
1122 1122 Merge wctx and p2 with ancestor pa and generate merge action list
1123 1123
1124 1124 branchmerge and force are as passed in to update
1125 1125 matcher = matcher to filter file lists
1126 1126 acceptremote = accept the incoming changes without prompting
1127 1127 """
1128 1128 if matcher is not None and matcher.always():
1129 1129 matcher = None
1130 1130
1131 1131 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1132 1132
1133 1133 # manifests fetched in order are going to be faster, so prime the caches
1134 1134 [x.manifest() for x in
1135 1135 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1136 1136
1137 1137 if followcopies:
1138 1138 ret = copies.mergecopies(repo, wctx, p2, pa)
1139 1139 copy, movewithdir, diverge, renamedelete, dirmove = ret
1140 1140
1141 1141 boolbm = pycompat.bytestr(bool(branchmerge))
1142 1142 boolf = pycompat.bytestr(bool(force))
1143 1143 boolm = pycompat.bytestr(bool(matcher))
1144 1144 repo.ui.note(_("resolving manifests\n"))
1145 1145 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1146 1146 % (boolbm, boolf, boolm))
1147 1147 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1148 1148
1149 1149 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1150 1150 copied = set(copy.values())
1151 1151 copied.update(movewithdir.values())
1152 1152
1153 1153 if '.hgsubstate' in m1:
1154 1154 # check whether sub state is modified
1155 1155 if any(wctx.sub(s).dirty() for s in wctx.substate):
1156 1156 m1['.hgsubstate'] = modifiednodeid
1157 1157
1158 1158 # Don't use m2-vs-ma optimization if:
1159 1159 # - ma is the same as m1 or m2, which we're just going to diff again later
1160 1160 # - The caller specifically asks for a full diff, which is useful during bid
1161 1161 # merge.
1162 1162 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1163 1163 # Identify which files are relevant to the merge, so we can limit the
1164 1164 # total m1-vs-m2 diff to just those files. This has significant
1165 1165 # performance benefits in large repositories.
1166 1166 relevantfiles = set(ma.diff(m2).keys())
1167 1167
1168 1168 # For copied and moved files, we need to add the source file too.
1169 1169 for copykey, copyvalue in copy.iteritems():
1170 1170 if copyvalue in relevantfiles:
1171 1171 relevantfiles.add(copykey)
1172 1172 for movedirkey in movewithdir:
1173 1173 relevantfiles.add(movedirkey)
1174 1174 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1175 1175 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1176 1176
1177 1177 diff = m1.diff(m2, match=matcher)
1178 1178
1179 1179 if matcher is None:
1180 1180 matcher = matchmod.always('', '')
1181 1181
1182 1182 actions = {}
1183 1183 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1184 1184 if n1 and n2: # file exists on both local and remote side
1185 1185 if f not in ma:
1186 1186 fa = copy.get(f, None)
1187 1187 if fa is not None:
1188 1188 actions[f] = (ACTION_MERGE, (f, f, fa, False, pa.node()),
1189 1189 'both renamed from %s' % fa)
1190 1190 else:
1191 1191 actions[f] = (ACTION_MERGE, (f, f, None, False, pa.node()),
1192 1192 'both created')
1193 1193 else:
1194 1194 a = ma[f]
1195 1195 fla = ma.flags(f)
1196 1196 nol = 'l' not in fl1 + fl2 + fla
1197 1197 if n2 == a and fl2 == fla:
1198 1198 actions[f] = (ACTION_KEEP, (), 'remote unchanged')
1199 1199 elif n1 == a and fl1 == fla: # local unchanged - use remote
1200 1200 if n1 == n2: # optimization: keep local content
1201 1201 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1202 1202 else:
1203 1203 actions[f] = (ACTION_GET, (fl2, False),
1204 1204 'remote is newer')
1205 1205 elif nol and n2 == a: # remote only changed 'x'
1206 1206 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1207 1207 elif nol and n1 == a: # local only changed 'x'
1208 1208 actions[f] = (ACTION_GET, (fl1, False), 'remote is newer')
1209 1209 else: # both changed something
1210 1210 actions[f] = (ACTION_MERGE, (f, f, f, False, pa.node()),
1211 1211 'versions differ')
1212 1212 elif n1: # file exists only on local side
1213 1213 if f in copied:
1214 1214 pass # we'll deal with it on m2 side
1215 1215 elif f in movewithdir: # directory rename, move local
1216 1216 f2 = movewithdir[f]
1217 1217 if f2 in m2:
1218 1218 actions[f2] = (ACTION_MERGE, (f, f2, None, True, pa.node()),
1219 1219 'remote directory rename, both created')
1220 1220 else:
1221 1221 actions[f2] = (ACTION_DIR_RENAME_MOVE_LOCAL, (f, fl1),
1222 1222 'remote directory rename - move from %s' % f)
1223 1223 elif f in copy:
1224 1224 f2 = copy[f]
1225 1225 actions[f] = (ACTION_MERGE, (f, f2, f2, False, pa.node()),
1226 1226 'local copied/moved from %s' % f2)
1227 1227 elif f in ma: # clean, a different, no remote
1228 1228 if n1 != ma[f]:
1229 1229 if acceptremote:
1230 1230 actions[f] = (ACTION_REMOVE, None, 'remote delete')
1231 1231 else:
1232 1232 actions[f] = (ACTION_CHANGED_DELETED,
1233 1233 (f, None, f, False, pa.node()),
1234 1234 'prompt changed/deleted')
1235 1235 elif n1 == addednodeid:
1236 1236 # This extra 'a' is added by working copy manifest to mark
1237 1237 # the file as locally added. We should forget it instead of
1238 1238 # deleting it.
1239 1239 actions[f] = (ACTION_FORGET, None, 'remote deleted')
1240 1240 else:
1241 1241 actions[f] = (ACTION_REMOVE, None, 'other deleted')
1242 1242 elif n2: # file exists only on remote side
1243 1243 if f in copied:
1244 1244 pass # we'll deal with it on m1 side
1245 1245 elif f in movewithdir:
1246 1246 f2 = movewithdir[f]
1247 1247 if f2 in m1:
1248 1248 actions[f2] = (ACTION_MERGE,
1249 1249 (f2, f, None, False, pa.node()),
1250 1250 'local directory rename, both created')
1251 1251 else:
1252 1252 actions[f2] = (ACTION_LOCAL_DIR_RENAME_GET, (f, fl2),
1253 1253 'local directory rename - get from %s' % f)
1254 1254 elif f in copy:
1255 1255 f2 = copy[f]
1256 1256 if f2 in m2:
1257 1257 actions[f] = (ACTION_MERGE, (f2, f, f2, False, pa.node()),
1258 1258 'remote copied from %s' % f2)
1259 1259 else:
1260 1260 actions[f] = (ACTION_MERGE, (f2, f, f2, True, pa.node()),
1261 1261 'remote moved from %s' % f2)
1262 1262 elif f not in ma:
1263 1263 # local unknown, remote created: the logic is described by the
1264 1264 # following table:
1265 1265 #
1266 1266 # force branchmerge different | action
1267 1267 # n * * | create
1268 1268 # y n * | create
1269 1269 # y y n | create
1270 1270 # y y y | merge
1271 1271 #
1272 1272 # Checking whether the files are different is expensive, so we
1273 1273 # don't do that when we can avoid it.
1274 1274 if not force:
1275 1275 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1276 1276 elif not branchmerge:
1277 1277 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1278 1278 else:
1279 1279 actions[f] = (ACTION_CREATED_MERGE, (fl2, pa.node()),
1280 1280 'remote created, get or merge')
1281 1281 elif n2 != ma[f]:
1282 1282 df = None
1283 1283 for d in dirmove:
1284 1284 if f.startswith(d):
1285 1285 # new file added in a directory that was moved
1286 1286 df = dirmove[d] + f[len(d):]
1287 1287 break
1288 1288 if df is not None and df in m1:
1289 1289 actions[df] = (ACTION_MERGE, (df, f, f, False, pa.node()),
1290 1290 'local directory rename - respect move '
1291 1291 'from %s' % f)
1292 1292 elif acceptremote:
1293 1293 actions[f] = (ACTION_CREATED, (fl2,), 'remote recreating')
1294 1294 else:
1295 1295 actions[f] = (ACTION_DELETED_CHANGED,
1296 1296 (None, f, f, False, pa.node()),
1297 1297 'prompt deleted/changed')
1298 1298
1299 1299 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1300 1300 # If we are merging, look for path conflicts.
1301 1301 checkpathconflicts(repo, wctx, p2, actions)
1302 1302
1303 1303 narrowmatch = repo.narrowmatch()
1304 1304 if not narrowmatch.always():
1305 1305 # Updates "actions" in place
1306 1306 _filternarrowactions(narrowmatch, branchmerge, actions)
1307 1307
1308 1308 return actions, diverge, renamedelete
1309 1309
1310 1310 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1311 1311 """Resolves false conflicts where the nodeid changed but the content
1312 1312 remained the same."""
1313 1313 # We force a copy of actions.items() because we're going to mutate
1314 1314 # actions as we resolve trivial conflicts.
1315 1315 for f, (m, args, msg) in list(actions.items()):
1316 1316 if (m == ACTION_CHANGED_DELETED and f in ancestor
1317 1317 and not wctx[f].cmp(ancestor[f])):
1318 1318 # local did change but ended up with same content
1319 1319 actions[f] = ACTION_REMOVE, None, 'prompt same'
1320 1320 elif (m == ACTION_DELETED_CHANGED and f in ancestor
1321 1321 and not mctx[f].cmp(ancestor[f])):
1322 1322 # remote did change but ended up with same content
1323 1323 del actions[f] # don't get = keep local deleted
1324 1324
1325 1325 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1326 1326 acceptremote, followcopies, matcher=None,
1327 1327 mergeforce=False):
1328 1328 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1329 1329 # Avoid cycle.
1330 1330 from . import sparse
1331 1331
1332 1332 if len(ancestors) == 1: # default
1333 1333 actions, diverge, renamedelete = manifestmerge(
1334 1334 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1335 1335 acceptremote, followcopies)
1336 1336 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1337 1337
1338 1338 else: # only when merge.preferancestor=* - the default
1339 1339 repo.ui.note(
1340 1340 _("note: merging %s and %s using bids from ancestors %s\n") %
1341 1341 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1342 1342 for anc in ancestors)))
1343 1343
1344 1344 # Call for bids
1345 1345 fbids = {} # mapping filename to bids (action method to list af actions)
1346 1346 diverge, renamedelete = None, None
1347 1347 for ancestor in ancestors:
1348 1348 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1349 1349 actions, diverge1, renamedelete1 = manifestmerge(
1350 1350 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1351 1351 acceptremote, followcopies, forcefulldiff=True)
1352 1352 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1353 1353
1354 1354 # Track the shortest set of warning on the theory that bid
1355 1355 # merge will correctly incorporate more information
1356 1356 if diverge is None or len(diverge1) < len(diverge):
1357 1357 diverge = diverge1
1358 1358 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1359 1359 renamedelete = renamedelete1
1360 1360
1361 1361 for f, a in sorted(actions.iteritems()):
1362 1362 m, args, msg = a
1363 1363 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1364 1364 if f in fbids:
1365 1365 d = fbids[f]
1366 1366 if m in d:
1367 1367 d[m].append(a)
1368 1368 else:
1369 1369 d[m] = [a]
1370 1370 else:
1371 1371 fbids[f] = {m: [a]}
1372 1372
1373 1373 # Pick the best bid for each file
1374 1374 repo.ui.note(_('\nauction for merging merge bids\n'))
1375 1375 actions = {}
1376 1376 dms = [] # filenames that have dm actions
1377 1377 for f, bids in sorted(fbids.items()):
1378 1378 # bids is a mapping from action method to list af actions
1379 1379 # Consensus?
1380 1380 if len(bids) == 1: # all bids are the same kind of method
1381 1381 m, l = list(bids.items())[0]
1382 1382 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1383 1383 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1384 1384 actions[f] = l[0]
1385 1385 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1386 1386 dms.append(f)
1387 1387 continue
1388 1388 # If keep is an option, just do it.
1389 1389 if ACTION_KEEP in bids:
1390 1390 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1391 1391 actions[f] = bids[ACTION_KEEP][0]
1392 1392 continue
1393 1393 # If there are gets and they all agree [how could they not?], do it.
1394 1394 if ACTION_GET in bids:
1395 1395 ga0 = bids[ACTION_GET][0]
1396 1396 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1397 1397 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1398 1398 actions[f] = ga0
1399 1399 continue
1400 1400 # TODO: Consider other simple actions such as mode changes
1401 1401 # Handle inefficient democrazy.
1402 1402 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1403 1403 for m, l in sorted(bids.items()):
1404 1404 for _f, args, msg in l:
1405 1405 repo.ui.note(' %s -> %s\n' % (msg, m))
1406 1406 # Pick random action. TODO: Instead, prompt user when resolving
1407 1407 m, l = list(bids.items())[0]
1408 1408 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1409 1409 (f, m))
1410 1410 actions[f] = l[0]
1411 1411 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1412 1412 dms.append(f)
1413 1413 continue
1414 1414 # Work around 'dm' that can cause multiple actions for the same file
1415 1415 for f in dms:
1416 1416 dm, (f0, flags), msg = actions[f]
1417 1417 assert dm == ACTION_DIR_RENAME_MOVE_LOCAL, dm
1418 1418 if f0 in actions and actions[f0][0] == ACTION_REMOVE:
1419 1419 # We have one bid for removing a file and another for moving it.
1420 1420 # These two could be merged as first move and then delete ...
1421 1421 # but instead drop moving and just delete.
1422 1422 del actions[f]
1423 1423 repo.ui.note(_('end of auction\n\n'))
1424 1424
1425 1425 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1426 1426
1427 1427 if wctx.rev() is None:
1428 1428 fractions = _forgetremoved(wctx, mctx, branchmerge)
1429 1429 actions.update(fractions)
1430 1430
1431 1431 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1432 1432 actions)
1433 1433
1434 1434 return prunedactions, diverge, renamedelete
1435 1435
1436 1436 def _getcwd():
1437 1437 try:
1438 1438 return pycompat.getcwd()
1439 1439 except OSError as err:
1440 1440 if err.errno == errno.ENOENT:
1441 1441 return None
1442 1442 raise
1443 1443
1444 1444 def batchremove(repo, wctx, actions):
1445 1445 """apply removes to the working directory
1446 1446
1447 1447 yields tuples for progress updates
1448 1448 """
1449 1449 verbose = repo.ui.verbose
1450 1450 cwd = _getcwd()
1451 1451 i = 0
1452 1452 for f, args, msg in actions:
1453 1453 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1454 1454 if verbose:
1455 1455 repo.ui.note(_("removing %s\n") % f)
1456 1456 wctx[f].audit()
1457 1457 try:
1458 1458 wctx[f].remove(ignoremissing=True)
1459 1459 except OSError as inst:
1460 1460 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1461 1461 (f, inst.strerror))
1462 1462 if i == 100:
1463 1463 yield i, f
1464 1464 i = 0
1465 1465 i += 1
1466 1466 if i > 0:
1467 1467 yield i, f
1468 1468
1469 1469 if cwd and not _getcwd():
1470 1470 # cwd was removed in the course of removing files; print a helpful
1471 1471 # warning.
1472 1472 repo.ui.warn(_("current directory was removed\n"
1473 1473 "(consider changing to repo root: %s)\n") % repo.root)
1474 1474
1475 1475 def batchget(repo, mctx, wctx, actions):
1476 1476 """apply gets to the working directory
1477 1477
1478 1478 mctx is the context to get from
1479 1479
1480 1480 yields tuples for progress updates
1481 1481 """
1482 1482 verbose = repo.ui.verbose
1483 1483 fctx = mctx.filectx
1484 1484 ui = repo.ui
1485 1485 i = 0
1486 1486 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1487 1487 for f, (flags, backup), msg in actions:
1488 1488 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1489 1489 if verbose:
1490 1490 repo.ui.note(_("getting %s\n") % f)
1491 1491
1492 1492 if backup:
1493 1493 # If a file or directory exists with the same name, back that
1494 1494 # up. Otherwise, look to see if there is a file that conflicts
1495 1495 # with a directory this file is in, and if so, back that up.
1496 1496 absf = repo.wjoin(f)
1497 1497 if not repo.wvfs.lexists(f):
1498 1498 for p in util.finddirs(f):
1499 1499 if repo.wvfs.isfileorlink(p):
1500 1500 absf = repo.wjoin(p)
1501 1501 break
1502 1502 orig = scmutil.origpath(ui, repo, absf)
1503 1503 if repo.wvfs.lexists(absf):
1504 1504 util.rename(absf, orig)
1505 1505 wctx[f].clearunknown()
1506 1506 atomictemp = ui.configbool("experimental", "update.atomic-file")
1507 1507 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1508 1508 atomictemp=atomictemp)
1509 1509 if i == 100:
1510 1510 yield i, f
1511 1511 i = 0
1512 1512 i += 1
1513 1513 if i > 0:
1514 1514 yield i, f
1515 1515
1516 1516 def _prefetchfiles(repo, ctx, actions):
1517 1517 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1518 1518 of merge actions. ``ctx`` is the context being merged in."""
1519 1519
1520 1520 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1521 1521 # don't touch the context to be merged in. 'cd' is skipped, because
1522 1522 # changed/deleted never resolves to something from the remote side.
1523 1523 oplist = [actions[a] for a in (ACTION_GET, ACTION_DELETED_CHANGED,
1524 1524 ACTION_LOCAL_DIR_RENAME_GET, ACTION_MERGE)]
1525 1525 prefetch = scmutil.prefetchfiles
1526 1526 matchfiles = scmutil.matchfiles
1527 1527 prefetch(repo, [ctx.rev()],
1528 1528 matchfiles(repo,
1529 1529 [f for sublist in oplist for f, args, msg in sublist]))
1530 1530
1531 1531 @attr.s(frozen=True)
1532 1532 class updateresult(object):
1533 1533 updatedcount = attr.ib()
1534 1534 mergedcount = attr.ib()
1535 1535 removedcount = attr.ib()
1536 1536 unresolvedcount = attr.ib()
1537 1537
1538 1538 def isempty(self):
1539 1539 return (not self.updatedcount and not self.mergedcount
1540 1540 and not self.removedcount and not self.unresolvedcount)
1541 1541
1542 1542 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1543 1543 """apply the merge action list to the working directory
1544 1544
1545 1545 wctx is the working copy context
1546 1546 mctx is the context to be merged into the working copy
1547 1547
1548 1548 Return a tuple of counts (updated, merged, removed, unresolved) that
1549 1549 describes how many files were affected by the update.
1550 1550 """
1551 1551
1552 1552 _prefetchfiles(repo, mctx, actions)
1553 1553
1554 1554 updated, merged, removed = 0, 0, 0
1555 1555 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1556 1556 moves = []
1557 1557 for m, l in actions.items():
1558 1558 l.sort()
1559 1559
1560 1560 # 'cd' and 'dc' actions are treated like other merge conflicts
1561 1561 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1562 1562 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1563 1563 mergeactions.extend(actions[ACTION_MERGE])
1564 1564 for f, args, msg in mergeactions:
1565 1565 f1, f2, fa, move, anc = args
1566 1566 if f == '.hgsubstate': # merged internally
1567 1567 continue
1568 1568 if f1 is None:
1569 1569 fcl = filemerge.absentfilectx(wctx, fa)
1570 1570 else:
1571 1571 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1572 1572 fcl = wctx[f1]
1573 1573 if f2 is None:
1574 1574 fco = filemerge.absentfilectx(mctx, fa)
1575 1575 else:
1576 1576 fco = mctx[f2]
1577 1577 actx = repo[anc]
1578 1578 if fa in actx:
1579 1579 fca = actx[fa]
1580 1580 else:
1581 1581 # TODO: move to absentfilectx
1582 1582 fca = repo.filectx(f1, fileid=nullrev)
1583 1583 ms.add(fcl, fco, fca, f)
1584 1584 if f1 != f and move:
1585 1585 moves.append(f1)
1586 1586
1587 _updating = _('updating')
1588 _files = _('files')
1589 progress = repo.ui.progress
1590
1591 1587 # remove renamed files after safely stored
1592 1588 for f in moves:
1593 1589 if wctx[f].lexists():
1594 1590 repo.ui.debug("removing %s\n" % f)
1595 1591 wctx[f].audit()
1596 1592 wctx[f].remove()
1597 1593
1598 1594 numupdates = sum(len(l) for m, l in actions.items()
1599 1595 if m != ACTION_KEEP)
1600 z = 0
1596 progress = repo.ui.makeprogress(_('updating'), unit=_('files'),
1597 total=numupdates)
1601 1598
1602 1599 if [a for a in actions[ACTION_REMOVE] if a[0] == '.hgsubstate']:
1603 1600 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1604 1601
1605 1602 # record path conflicts
1606 1603 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1607 1604 f1, fo = args
1608 1605 s = repo.ui.status
1609 1606 s(_("%s: path conflict - a file or link has the same name as a "
1610 1607 "directory\n") % f)
1611 1608 if fo == 'l':
1612 1609 s(_("the local file has been renamed to %s\n") % f1)
1613 1610 else:
1614 1611 s(_("the remote file has been renamed to %s\n") % f1)
1615 1612 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1616 1613 ms.addpath(f, f1, fo)
1617 z += 1
1618 progress(_updating, z, item=f, total=numupdates, unit=_files)
1614 progress.increment(item=f)
1619 1615
1620 1616 # When merging in-memory, we can't support worker processes, so set the
1621 1617 # per-item cost at 0 in that case.
1622 1618 cost = 0 if wctx.isinmemory() else 0.001
1623 1619
1624 1620 # remove in parallel (must come before resolving path conflicts and getting)
1625 1621 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1626 1622 actions[ACTION_REMOVE])
1627 1623 for i, item in prog:
1628 z += i
1629 progress(_updating, z, item=item, total=numupdates, unit=_files)
1624 progress.increment(step=i, item=item)
1630 1625 removed = len(actions[ACTION_REMOVE])
1631 1626
1632 1627 # resolve path conflicts (must come before getting)
1633 1628 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1634 1629 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1635 1630 f0, = args
1636 1631 if wctx[f0].lexists():
1637 1632 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1638 1633 wctx[f].audit()
1639 1634 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1640 1635 wctx[f0].remove()
1641 z += 1
1642 progress(_updating, z, item=f, total=numupdates, unit=_files)
1636 progress.increment(item=f)
1643 1637
1644 1638 # get in parallel
1645 1639 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1646 1640 actions[ACTION_GET])
1647 1641 for i, item in prog:
1648 z += i
1649 progress(_updating, z, item=item, total=numupdates, unit=_files)
1642 progress.increment(step=i, item=item)
1650 1643 updated = len(actions[ACTION_GET])
1651 1644
1652 1645 if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']:
1653 1646 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1654 1647
1655 1648 # forget (manifest only, just log it) (must come first)
1656 1649 for f, args, msg in actions[ACTION_FORGET]:
1657 1650 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1658 z += 1
1659 progress(_updating, z, item=f, total=numupdates, unit=_files)
1651 progress.increment(item=f)
1660 1652
1661 1653 # re-add (manifest only, just log it)
1662 1654 for f, args, msg in actions[ACTION_ADD]:
1663 1655 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1664 z += 1
1665 progress(_updating, z, item=f, total=numupdates, unit=_files)
1656 progress.increment(item=f)
1666 1657
1667 1658 # re-add/mark as modified (manifest only, just log it)
1668 1659 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1669 1660 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1670 z += 1
1671 progress(_updating, z, item=f, total=numupdates, unit=_files)
1661 progress.increment(item=f)
1672 1662
1673 1663 # keep (noop, just log it)
1674 1664 for f, args, msg in actions[ACTION_KEEP]:
1675 1665 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1676 1666 # no progress
1677 1667
1678 1668 # directory rename, move local
1679 1669 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1680 1670 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1681 z += 1
1682 progress(_updating, z, item=f, total=numupdates, unit=_files)
1671 progress.increment(item=f)
1683 1672 f0, flags = args
1684 1673 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1685 1674 wctx[f].audit()
1686 1675 wctx[f].write(wctx.filectx(f0).data(), flags)
1687 1676 wctx[f0].remove()
1688 1677 updated += 1
1689 1678
1690 1679 # local directory rename, get
1691 1680 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1692 1681 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1693 z += 1
1694 progress(_updating, z, item=f, total=numupdates, unit=_files)
1682 progress.increment(item=f)
1695 1683 f0, flags = args
1696 1684 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1697 1685 wctx[f].write(mctx.filectx(f0).data(), flags)
1698 1686 updated += 1
1699 1687
1700 1688 # exec
1701 1689 for f, args, msg in actions[ACTION_EXEC]:
1702 1690 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1703 z += 1
1704 progress(_updating, z, item=f, total=numupdates, unit=_files)
1691 progress.increment(item=f)
1705 1692 flags, = args
1706 1693 wctx[f].audit()
1707 1694 wctx[f].setflags('l' in flags, 'x' in flags)
1708 1695 updated += 1
1709 1696
1710 1697 # the ordering is important here -- ms.mergedriver will raise if the merge
1711 1698 # driver has changed, and we want to be able to bypass it when overwrite is
1712 1699 # True
1713 1700 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1714 1701
1715 1702 if usemergedriver:
1716 1703 if wctx.isinmemory():
1717 1704 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1718 1705 "support mergedriver")
1719 1706 ms.commit()
1720 1707 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1721 1708 # the driver might leave some files unresolved
1722 1709 unresolvedf = set(ms.unresolved())
1723 1710 if not proceed:
1724 1711 # XXX setting unresolved to at least 1 is a hack to make sure we
1725 1712 # error out
1726 1713 return updateresult(updated, merged, removed,
1727 1714 max(len(unresolvedf), 1))
1728 1715 newactions = []
1729 1716 for f, args, msg in mergeactions:
1730 1717 if f in unresolvedf:
1731 1718 newactions.append((f, args, msg))
1732 1719 mergeactions = newactions
1733 1720
1734 1721 try:
1735 1722 # premerge
1736 1723 tocomplete = []
1737 1724 for f, args, msg in mergeactions:
1738 1725 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1739 z += 1
1740 progress(_updating, z, item=f, total=numupdates, unit=_files)
1726 progress.increment(item=f)
1741 1727 if f == '.hgsubstate': # subrepo states need updating
1742 1728 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1743 1729 overwrite, labels)
1744 1730 continue
1745 1731 wctx[f].audit()
1746 1732 complete, r = ms.preresolve(f, wctx)
1747 1733 if not complete:
1748 1734 numupdates += 1
1749 1735 tocomplete.append((f, args, msg))
1750 1736
1751 1737 # merge
1752 1738 for f, args, msg in tocomplete:
1753 1739 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1754 z += 1
1755 progress(_updating, z, item=f, total=numupdates, unit=_files)
1740 progress.increment(item=f, total=numupdates)
1756 1741 ms.resolve(f, wctx)
1757 1742
1758 1743 finally:
1759 1744 ms.commit()
1760 1745
1761 1746 unresolved = ms.unresolvedcount()
1762 1747
1763 1748 if (usemergedriver and not unresolved
1764 1749 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
1765 1750 if not driverconclude(repo, ms, wctx, labels=labels):
1766 1751 # XXX setting unresolved to at least 1 is a hack to make sure we
1767 1752 # error out
1768 1753 unresolved = max(unresolved, 1)
1769 1754
1770 1755 ms.commit()
1771 1756
1772 1757 msupdated, msmerged, msremoved = ms.counts()
1773 1758 updated += msupdated
1774 1759 merged += msmerged
1775 1760 removed += msremoved
1776 1761
1777 1762 extraactions = ms.actions()
1778 1763 if extraactions:
1779 1764 mfiles = set(a[0] for a in actions[ACTION_MERGE])
1780 1765 for k, acts in extraactions.iteritems():
1781 1766 actions[k].extend(acts)
1782 1767 # Remove these files from actions[ACTION_MERGE] as well. This is
1783 1768 # important because in recordupdates, files in actions[ACTION_MERGE]
1784 1769 # are processed after files in other actions, and the merge driver
1785 1770 # might add files to those actions via extraactions above. This can
1786 1771 # lead to a file being recorded twice, with poor results. This is
1787 1772 # especially problematic for actions[ACTION_REMOVE] (currently only
1788 1773 # possible with the merge driver in the initial merge process;
1789 1774 # interrupted merges don't go through this flow).
1790 1775 #
1791 1776 # The real fix here is to have indexes by both file and action so
1792 1777 # that when the action for a file is changed it is automatically
1793 1778 # reflected in the other action lists. But that involves a more
1794 1779 # complex data structure, so this will do for now.
1795 1780 #
1796 1781 # We don't need to do the same operation for 'dc' and 'cd' because
1797 1782 # those lists aren't consulted again.
1798 1783 mfiles.difference_update(a[0] for a in acts)
1799 1784
1800 1785 actions[ACTION_MERGE] = [a for a in actions[ACTION_MERGE]
1801 1786 if a[0] in mfiles]
1802 1787
1803 progress(_updating, None, total=numupdates, unit=_files)
1788 progress.update(None)
1804 1789 return updateresult(updated, merged, removed, unresolved)
1805 1790
1806 1791 def recordupdates(repo, actions, branchmerge):
1807 1792 "record merge actions to the dirstate"
1808 1793 # remove (must come first)
1809 1794 for f, args, msg in actions.get(ACTION_REMOVE, []):
1810 1795 if branchmerge:
1811 1796 repo.dirstate.remove(f)
1812 1797 else:
1813 1798 repo.dirstate.drop(f)
1814 1799
1815 1800 # forget (must come first)
1816 1801 for f, args, msg in actions.get(ACTION_FORGET, []):
1817 1802 repo.dirstate.drop(f)
1818 1803
1819 1804 # resolve path conflicts
1820 1805 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
1821 1806 f0, = args
1822 1807 origf0 = repo.dirstate.copied(f0) or f0
1823 1808 repo.dirstate.add(f)
1824 1809 repo.dirstate.copy(origf0, f)
1825 1810 if f0 == origf0:
1826 1811 repo.dirstate.remove(f0)
1827 1812 else:
1828 1813 repo.dirstate.drop(f0)
1829 1814
1830 1815 # re-add
1831 1816 for f, args, msg in actions.get(ACTION_ADD, []):
1832 1817 repo.dirstate.add(f)
1833 1818
1834 1819 # re-add/mark as modified
1835 1820 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
1836 1821 if branchmerge:
1837 1822 repo.dirstate.normallookup(f)
1838 1823 else:
1839 1824 repo.dirstate.add(f)
1840 1825
1841 1826 # exec change
1842 1827 for f, args, msg in actions.get(ACTION_EXEC, []):
1843 1828 repo.dirstate.normallookup(f)
1844 1829
1845 1830 # keep
1846 1831 for f, args, msg in actions.get(ACTION_KEEP, []):
1847 1832 pass
1848 1833
1849 1834 # get
1850 1835 for f, args, msg in actions.get(ACTION_GET, []):
1851 1836 if branchmerge:
1852 1837 repo.dirstate.otherparent(f)
1853 1838 else:
1854 1839 repo.dirstate.normal(f)
1855 1840
1856 1841 # merge
1857 1842 for f, args, msg in actions.get(ACTION_MERGE, []):
1858 1843 f1, f2, fa, move, anc = args
1859 1844 if branchmerge:
1860 1845 # We've done a branch merge, mark this file as merged
1861 1846 # so that we properly record the merger later
1862 1847 repo.dirstate.merge(f)
1863 1848 if f1 != f2: # copy/rename
1864 1849 if move:
1865 1850 repo.dirstate.remove(f1)
1866 1851 if f1 != f:
1867 1852 repo.dirstate.copy(f1, f)
1868 1853 else:
1869 1854 repo.dirstate.copy(f2, f)
1870 1855 else:
1871 1856 # We've update-merged a locally modified file, so
1872 1857 # we set the dirstate to emulate a normal checkout
1873 1858 # of that file some time in the past. Thus our
1874 1859 # merge will appear as a normal local file
1875 1860 # modification.
1876 1861 if f2 == f: # file not locally copied/moved
1877 1862 repo.dirstate.normallookup(f)
1878 1863 if move:
1879 1864 repo.dirstate.drop(f1)
1880 1865
1881 1866 # directory rename, move local
1882 1867 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
1883 1868 f0, flag = args
1884 1869 if branchmerge:
1885 1870 repo.dirstate.add(f)
1886 1871 repo.dirstate.remove(f0)
1887 1872 repo.dirstate.copy(f0, f)
1888 1873 else:
1889 1874 repo.dirstate.normal(f)
1890 1875 repo.dirstate.drop(f0)
1891 1876
1892 1877 # directory rename, get
1893 1878 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
1894 1879 f0, flag = args
1895 1880 if branchmerge:
1896 1881 repo.dirstate.add(f)
1897 1882 repo.dirstate.copy(f0, f)
1898 1883 else:
1899 1884 repo.dirstate.normal(f)
1900 1885
1901 1886 def update(repo, node, branchmerge, force, ancestor=None,
1902 1887 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1903 1888 updatecheck=None, wc=None):
1904 1889 """
1905 1890 Perform a merge between the working directory and the given node
1906 1891
1907 1892 node = the node to update to
1908 1893 branchmerge = whether to merge between branches
1909 1894 force = whether to force branch merging or file overwriting
1910 1895 matcher = a matcher to filter file lists (dirstate not updated)
1911 1896 mergeancestor = whether it is merging with an ancestor. If true,
1912 1897 we should accept the incoming changes for any prompts that occur.
1913 1898 If false, merging with an ancestor (fast-forward) is only allowed
1914 1899 between different named branches. This flag is used by rebase extension
1915 1900 as a temporary fix and should be avoided in general.
1916 1901 labels = labels to use for base, local and other
1917 1902 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1918 1903 this is True, then 'force' should be True as well.
1919 1904
1920 1905 The table below shows all the behaviors of the update command given the
1921 1906 -c/--check and -C/--clean or no options, whether the working directory is
1922 1907 dirty, whether a revision is specified, and the relationship of the parent
1923 1908 rev to the target rev (linear or not). Match from top first. The -n
1924 1909 option doesn't exist on the command line, but represents the
1925 1910 experimental.updatecheck=noconflict option.
1926 1911
1927 1912 This logic is tested by test-update-branches.t.
1928 1913
1929 1914 -c -C -n -m dirty rev linear | result
1930 1915 y y * * * * * | (1)
1931 1916 y * y * * * * | (1)
1932 1917 y * * y * * * | (1)
1933 1918 * y y * * * * | (1)
1934 1919 * y * y * * * | (1)
1935 1920 * * y y * * * | (1)
1936 1921 * * * * * n n | x
1937 1922 * * * * n * * | ok
1938 1923 n n n n y * y | merge
1939 1924 n n n n y y n | (2)
1940 1925 n n n y y * * | merge
1941 1926 n n y n y * * | merge if no conflict
1942 1927 n y n n y * * | discard
1943 1928 y n n n y * * | (3)
1944 1929
1945 1930 x = can't happen
1946 1931 * = don't-care
1947 1932 1 = incompatible options (checked in commands.py)
1948 1933 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1949 1934 3 = abort: uncommitted changes (checked in commands.py)
1950 1935
1951 1936 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1952 1937 to repo[None] if None is passed.
1953 1938
1954 1939 Return the same tuple as applyupdates().
1955 1940 """
1956 1941 # Avoid cycle.
1957 1942 from . import sparse
1958 1943
1959 1944 # This function used to find the default destination if node was None, but
1960 1945 # that's now in destutil.py.
1961 1946 assert node is not None
1962 1947 if not branchmerge and not force:
1963 1948 # TODO: remove the default once all callers that pass branchmerge=False
1964 1949 # and force=False pass a value for updatecheck. We may want to allow
1965 1950 # updatecheck='abort' to better suppport some of these callers.
1966 1951 if updatecheck is None:
1967 1952 updatecheck = 'linear'
1968 1953 assert updatecheck in ('none', 'linear', 'noconflict')
1969 1954 # If we're doing a partial update, we need to skip updating
1970 1955 # the dirstate, so make a note of any partial-ness to the
1971 1956 # update here.
1972 1957 if matcher is None or matcher.always():
1973 1958 partial = False
1974 1959 else:
1975 1960 partial = True
1976 1961 with repo.wlock():
1977 1962 if wc is None:
1978 1963 wc = repo[None]
1979 1964 pl = wc.parents()
1980 1965 p1 = pl[0]
1981 1966 pas = [None]
1982 1967 if ancestor is not None:
1983 1968 pas = [repo[ancestor]]
1984 1969
1985 1970 overwrite = force and not branchmerge
1986 1971
1987 1972 p2 = repo[node]
1988 1973 if pas[0] is None:
1989 1974 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1990 1975 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1991 1976 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1992 1977 else:
1993 1978 pas = [p1.ancestor(p2, warn=branchmerge)]
1994 1979
1995 1980 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1996 1981
1997 1982 ### check phase
1998 1983 if not overwrite:
1999 1984 if len(pl) > 1:
2000 1985 raise error.Abort(_("outstanding uncommitted merge"))
2001 1986 ms = mergestate.read(repo)
2002 1987 if list(ms.unresolved()):
2003 1988 raise error.Abort(_("outstanding merge conflicts"))
2004 1989 if branchmerge:
2005 1990 if pas == [p2]:
2006 1991 raise error.Abort(_("merging with a working directory ancestor"
2007 1992 " has no effect"))
2008 1993 elif pas == [p1]:
2009 1994 if not mergeancestor and wc.branch() == p2.branch():
2010 1995 raise error.Abort(_("nothing to merge"),
2011 1996 hint=_("use 'hg update' "
2012 1997 "or check 'hg heads'"))
2013 1998 if not force and (wc.files() or wc.deleted()):
2014 1999 raise error.Abort(_("uncommitted changes"),
2015 2000 hint=_("use 'hg status' to list changes"))
2016 2001 if not wc.isinmemory():
2017 2002 for s in sorted(wc.substate):
2018 2003 wc.sub(s).bailifchanged()
2019 2004
2020 2005 elif not overwrite:
2021 2006 if p1 == p2: # no-op update
2022 2007 # call the hooks and exit early
2023 2008 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
2024 2009 repo.hook('update', parent1=xp2, parent2='', error=0)
2025 2010 return updateresult(0, 0, 0, 0)
2026 2011
2027 2012 if (updatecheck == 'linear' and
2028 2013 pas not in ([p1], [p2])): # nonlinear
2029 2014 dirty = wc.dirty(missing=True)
2030 2015 if dirty:
2031 2016 # Branching is a bit strange to ensure we do the minimal
2032 2017 # amount of call to obsutil.foreground.
2033 2018 foreground = obsutil.foreground(repo, [p1.node()])
2034 2019 # note: the <node> variable contains a random identifier
2035 2020 if repo[node].node() in foreground:
2036 2021 pass # allow updating to successors
2037 2022 else:
2038 2023 msg = _("uncommitted changes")
2039 2024 hint = _("commit or update --clean to discard changes")
2040 2025 raise error.UpdateAbort(msg, hint=hint)
2041 2026 else:
2042 2027 # Allow jumping branches if clean and specific rev given
2043 2028 pass
2044 2029
2045 2030 if overwrite:
2046 2031 pas = [wc]
2047 2032 elif not branchmerge:
2048 2033 pas = [p1]
2049 2034
2050 2035 # deprecated config: merge.followcopies
2051 2036 followcopies = repo.ui.configbool('merge', 'followcopies')
2052 2037 if overwrite:
2053 2038 followcopies = False
2054 2039 elif not pas[0]:
2055 2040 followcopies = False
2056 2041 if not branchmerge and not wc.dirty(missing=True):
2057 2042 followcopies = False
2058 2043
2059 2044 ### calculate phase
2060 2045 actionbyfile, diverge, renamedelete = calculateupdates(
2061 2046 repo, wc, p2, pas, branchmerge, force, mergeancestor,
2062 2047 followcopies, matcher=matcher, mergeforce=mergeforce)
2063 2048
2064 2049 if updatecheck == 'noconflict':
2065 2050 for f, (m, args, msg) in actionbyfile.iteritems():
2066 2051 if m not in (ACTION_GET, ACTION_KEEP, ACTION_EXEC,
2067 2052 ACTION_REMOVE, ACTION_PATH_CONFLICT_RESOLVE):
2068 2053 msg = _("conflicting changes")
2069 2054 hint = _("commit or update --clean to discard changes")
2070 2055 raise error.Abort(msg, hint=hint)
2071 2056
2072 2057 # Prompt and create actions. Most of this is in the resolve phase
2073 2058 # already, but we can't handle .hgsubstate in filemerge or
2074 2059 # subrepoutil.submerge yet so we have to keep prompting for it.
2075 2060 if '.hgsubstate' in actionbyfile:
2076 2061 f = '.hgsubstate'
2077 2062 m, args, msg = actionbyfile[f]
2078 2063 prompts = filemerge.partextras(labels)
2079 2064 prompts['f'] = f
2080 2065 if m == ACTION_CHANGED_DELETED:
2081 2066 if repo.ui.promptchoice(
2082 2067 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
2083 2068 "use (c)hanged version or (d)elete?"
2084 2069 "$$ &Changed $$ &Delete") % prompts, 0):
2085 2070 actionbyfile[f] = (ACTION_REMOVE, None, 'prompt delete')
2086 2071 elif f in p1:
2087 2072 actionbyfile[f] = (ACTION_ADD_MODIFIED, None, 'prompt keep')
2088 2073 else:
2089 2074 actionbyfile[f] = (ACTION_ADD, None, 'prompt keep')
2090 2075 elif m == ACTION_DELETED_CHANGED:
2091 2076 f1, f2, fa, move, anc = args
2092 2077 flags = p2[f2].flags()
2093 2078 if repo.ui.promptchoice(
2094 2079 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
2095 2080 "use (c)hanged version or leave (d)eleted?"
2096 2081 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
2097 2082 actionbyfile[f] = (ACTION_GET, (flags, False),
2098 2083 'prompt recreating')
2099 2084 else:
2100 2085 del actionbyfile[f]
2101 2086
2102 2087 # Convert to dictionary-of-lists format
2103 2088 actions = dict((m, [])
2104 2089 for m in (
2105 2090 ACTION_ADD,
2106 2091 ACTION_ADD_MODIFIED,
2107 2092 ACTION_FORGET,
2108 2093 ACTION_GET,
2109 2094 ACTION_CHANGED_DELETED,
2110 2095 ACTION_DELETED_CHANGED,
2111 2096 ACTION_REMOVE,
2112 2097 ACTION_DIR_RENAME_MOVE_LOCAL,
2113 2098 ACTION_LOCAL_DIR_RENAME_GET,
2114 2099 ACTION_MERGE,
2115 2100 ACTION_EXEC,
2116 2101 ACTION_KEEP,
2117 2102 ACTION_PATH_CONFLICT,
2118 2103 ACTION_PATH_CONFLICT_RESOLVE))
2119 2104 for f, (m, args, msg) in actionbyfile.iteritems():
2120 2105 if m not in actions:
2121 2106 actions[m] = []
2122 2107 actions[m].append((f, args, msg))
2123 2108
2124 2109 if not util.fscasesensitive(repo.path):
2125 2110 # check collision between files only in p2 for clean update
2126 2111 if (not branchmerge and
2127 2112 (force or not wc.dirty(missing=True, branch=False))):
2128 2113 _checkcollision(repo, p2.manifest(), None)
2129 2114 else:
2130 2115 _checkcollision(repo, wc.manifest(), actions)
2131 2116
2132 2117 # divergent renames
2133 2118 for f, fl in sorted(diverge.iteritems()):
2134 2119 repo.ui.warn(_("note: possible conflict - %s was renamed "
2135 2120 "multiple times to:\n") % f)
2136 2121 for nf in fl:
2137 2122 repo.ui.warn(" %s\n" % nf)
2138 2123
2139 2124 # rename and delete
2140 2125 for f, fl in sorted(renamedelete.iteritems()):
2141 2126 repo.ui.warn(_("note: possible conflict - %s was deleted "
2142 2127 "and renamed to:\n") % f)
2143 2128 for nf in fl:
2144 2129 repo.ui.warn(" %s\n" % nf)
2145 2130
2146 2131 ### apply phase
2147 2132 if not branchmerge: # just jump to the new rev
2148 2133 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2149 2134 if not partial and not wc.isinmemory():
2150 2135 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2151 2136 # note that we're in the middle of an update
2152 2137 repo.vfs.write('updatestate', p2.hex())
2153 2138
2154 2139 # Advertise fsmonitor when its presence could be useful.
2155 2140 #
2156 2141 # We only advertise when performing an update from an empty working
2157 2142 # directory. This typically only occurs during initial clone.
2158 2143 #
2159 2144 # We give users a mechanism to disable the warning in case it is
2160 2145 # annoying.
2161 2146 #
2162 2147 # We only allow on Linux and MacOS because that's where fsmonitor is
2163 2148 # considered stable.
2164 2149 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2165 2150 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2166 2151 'warn_update_file_count')
2167 2152 try:
2168 2153 # avoid cycle: extensions -> cmdutil -> merge
2169 2154 from . import extensions
2170 2155 extensions.find('fsmonitor')
2171 2156 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2172 2157 # We intentionally don't look at whether fsmonitor has disabled
2173 2158 # itself because a) fsmonitor may have already printed a warning
2174 2159 # b) we only care about the config state here.
2175 2160 except KeyError:
2176 2161 fsmonitorenabled = False
2177 2162
2178 2163 if (fsmonitorwarning
2179 2164 and not fsmonitorenabled
2180 2165 and p1.node() == nullid
2181 2166 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2182 2167 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2183 2168 repo.ui.warn(
2184 2169 _('(warning: large working directory being used without '
2185 2170 'fsmonitor enabled; enable fsmonitor to improve performance; '
2186 2171 'see "hg help -e fsmonitor")\n'))
2187 2172
2188 2173 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2189 2174
2190 2175 if not partial and not wc.isinmemory():
2191 2176 with repo.dirstate.parentchange():
2192 2177 repo.setparents(fp1, fp2)
2193 2178 recordupdates(repo, actions, branchmerge)
2194 2179 # update completed, clear state
2195 2180 util.unlink(repo.vfs.join('updatestate'))
2196 2181
2197 2182 if not branchmerge:
2198 2183 repo.dirstate.setbranch(p2.branch())
2199 2184
2200 2185 # If we're updating to a location, clean up any stale temporary includes
2201 2186 # (ex: this happens during hg rebase --abort).
2202 2187 if not branchmerge:
2203 2188 sparse.prunetemporaryincludes(repo)
2204 2189
2205 2190 if not partial:
2206 2191 repo.hook('update', parent1=xp1, parent2=xp2,
2207 2192 error=stats.unresolvedcount)
2208 2193 return stats
2209 2194
2210 2195 def graft(repo, ctx, pctx, labels, keepparent=False):
2211 2196 """Do a graft-like merge.
2212 2197
2213 2198 This is a merge where the merge ancestor is chosen such that one
2214 2199 or more changesets are grafted onto the current changeset. In
2215 2200 addition to the merge, this fixes up the dirstate to include only
2216 2201 a single parent (if keepparent is False) and tries to duplicate any
2217 2202 renames/copies appropriately.
2218 2203
2219 2204 ctx - changeset to rebase
2220 2205 pctx - merge base, usually ctx.p1()
2221 2206 labels - merge labels eg ['local', 'graft']
2222 2207 keepparent - keep second parent if any
2223 2208
2224 2209 """
2225 2210 # If we're grafting a descendant onto an ancestor, be sure to pass
2226 2211 # mergeancestor=True to update. This does two things: 1) allows the merge if
2227 2212 # the destination is the same as the parent of the ctx (so we can use graft
2228 2213 # to copy commits), and 2) informs update that the incoming changes are
2229 2214 # newer than the destination so it doesn't prompt about "remote changed foo
2230 2215 # which local deleted".
2231 2216 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2232 2217
2233 2218 stats = update(repo, ctx.node(), True, True, pctx.node(),
2234 2219 mergeancestor=mergeancestor, labels=labels)
2235 2220
2236 2221 pother = nullid
2237 2222 parents = ctx.parents()
2238 2223 if keepparent and len(parents) == 2 and pctx in parents:
2239 2224 parents.remove(pctx)
2240 2225 pother = parents[0].node()
2241 2226
2242 2227 with repo.dirstate.parentchange():
2243 2228 repo.setparents(repo['.'].node(), pother)
2244 2229 repo.dirstate.write(repo.currenttransaction())
2245 2230 # fix up dirstate for copies and renames
2246 2231 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2247 2232 return stats
@@ -1,1615 +1,1636 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28
29 29 from . import (
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 revsetlang,
39 39 similar,
40 40 url,
41 41 util,
42 42 vfs,
43 43 )
44 44
45 45 from .utils import (
46 46 procutil,
47 47 stringutil,
48 48 )
49 49
50 50 if pycompat.iswindows:
51 51 from . import scmwindows as scmplatform
52 52 else:
53 53 from . import scmposix as scmplatform
54 54
55 55 termsize = scmplatform.termsize
56 56
57 57 class status(tuple):
58 58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
59 59 and 'ignored' properties are only relevant to the working copy.
60 60 '''
61 61
62 62 __slots__ = ()
63 63
64 64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
65 65 clean):
66 66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
67 67 ignored, clean))
68 68
69 69 @property
70 70 def modified(self):
71 71 '''files that have been modified'''
72 72 return self[0]
73 73
74 74 @property
75 75 def added(self):
76 76 '''files that have been added'''
77 77 return self[1]
78 78
79 79 @property
80 80 def removed(self):
81 81 '''files that have been removed'''
82 82 return self[2]
83 83
84 84 @property
85 85 def deleted(self):
86 86 '''files that are in the dirstate, but have been deleted from the
87 87 working copy (aka "missing")
88 88 '''
89 89 return self[3]
90 90
91 91 @property
92 92 def unknown(self):
93 93 '''files not in the dirstate that are not ignored'''
94 94 return self[4]
95 95
96 96 @property
97 97 def ignored(self):
98 98 '''files not in the dirstate that are ignored (by _dirignore())'''
99 99 return self[5]
100 100
101 101 @property
102 102 def clean(self):
103 103 '''files that have not been modified'''
104 104 return self[6]
105 105
106 106 def __repr__(self, *args, **kwargs):
107 107 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
108 108 r'unknown=%s, ignored=%s, clean=%s>') %
109 109 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
110 110
111 111 def itersubrepos(ctx1, ctx2):
112 112 """find subrepos in ctx1 or ctx2"""
113 113 # Create a (subpath, ctx) mapping where we prefer subpaths from
114 114 # ctx1. The subpaths from ctx2 are important when the .hgsub file
115 115 # has been modified (in ctx2) but not yet committed (in ctx1).
116 116 subpaths = dict.fromkeys(ctx2.substate, ctx2)
117 117 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
118 118
119 119 missing = set()
120 120
121 121 for subpath in ctx2.substate:
122 122 if subpath not in ctx1.substate:
123 123 del subpaths[subpath]
124 124 missing.add(subpath)
125 125
126 126 for subpath, ctx in sorted(subpaths.iteritems()):
127 127 yield subpath, ctx.sub(subpath)
128 128
129 129 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
130 130 # status and diff will have an accurate result when it does
131 131 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
132 132 # against itself.
133 133 for subpath in missing:
134 134 yield subpath, ctx2.nullsub(subpath, ctx1)
135 135
136 136 def nochangesfound(ui, repo, excluded=None):
137 137 '''Report no changes for push/pull, excluded is None or a list of
138 138 nodes excluded from the push/pull.
139 139 '''
140 140 secretlist = []
141 141 if excluded:
142 142 for n in excluded:
143 143 ctx = repo[n]
144 144 if ctx.phase() >= phases.secret and not ctx.extinct():
145 145 secretlist.append(n)
146 146
147 147 if secretlist:
148 148 ui.status(_("no changes found (ignored %d secret changesets)\n")
149 149 % len(secretlist))
150 150 else:
151 151 ui.status(_("no changes found\n"))
152 152
153 153 def callcatch(ui, func):
154 154 """call func() with global exception handling
155 155
156 156 return func() if no exception happens. otherwise do some error handling
157 157 and return an exit code accordingly. does not handle all exceptions.
158 158 """
159 159 try:
160 160 try:
161 161 return func()
162 162 except: # re-raises
163 163 ui.traceback()
164 164 raise
165 165 # Global exception handling, alphabetically
166 166 # Mercurial-specific first, followed by built-in and library exceptions
167 167 except error.LockHeld as inst:
168 168 if inst.errno == errno.ETIMEDOUT:
169 169 reason = _('timed out waiting for lock held by %r') % inst.locker
170 170 else:
171 171 reason = _('lock held by %r') % inst.locker
172 172 ui.warn(_("abort: %s: %s\n")
173 173 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
174 174 if not inst.locker:
175 175 ui.warn(_("(lock might be very busy)\n"))
176 176 except error.LockUnavailable as inst:
177 177 ui.warn(_("abort: could not lock %s: %s\n") %
178 178 (inst.desc or stringutil.forcebytestr(inst.filename),
179 179 encoding.strtolocal(inst.strerror)))
180 180 except error.OutOfBandError as inst:
181 181 if inst.args:
182 182 msg = _("abort: remote error:\n")
183 183 else:
184 184 msg = _("abort: remote error\n")
185 185 ui.warn(msg)
186 186 if inst.args:
187 187 ui.warn(''.join(inst.args))
188 188 if inst.hint:
189 189 ui.warn('(%s)\n' % inst.hint)
190 190 except error.RepoError as inst:
191 191 ui.warn(_("abort: %s!\n") % inst)
192 192 if inst.hint:
193 193 ui.warn(_("(%s)\n") % inst.hint)
194 194 except error.ResponseError as inst:
195 195 ui.warn(_("abort: %s") % inst.args[0])
196 196 msg = inst.args[1]
197 197 if isinstance(msg, type(u'')):
198 198 msg = pycompat.sysbytes(msg)
199 199 if not isinstance(msg, bytes):
200 200 ui.warn(" %r\n" % (msg,))
201 201 elif not msg:
202 202 ui.warn(_(" empty string\n"))
203 203 else:
204 204 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
205 205 except error.CensoredNodeError as inst:
206 206 ui.warn(_("abort: file censored %s!\n") % inst)
207 207 except error.RevlogError as inst:
208 208 ui.warn(_("abort: %s!\n") % inst)
209 209 except error.InterventionRequired as inst:
210 210 ui.warn("%s\n" % inst)
211 211 if inst.hint:
212 212 ui.warn(_("(%s)\n") % inst.hint)
213 213 return 1
214 214 except error.WdirUnsupported:
215 215 ui.warn(_("abort: working directory revision cannot be specified\n"))
216 216 except error.Abort as inst:
217 217 ui.warn(_("abort: %s\n") % inst)
218 218 if inst.hint:
219 219 ui.warn(_("(%s)\n") % inst.hint)
220 220 except ImportError as inst:
221 221 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
222 222 m = stringutil.forcebytestr(inst).split()[-1]
223 223 if m in "mpatch bdiff".split():
224 224 ui.warn(_("(did you forget to compile extensions?)\n"))
225 225 elif m in "zlib".split():
226 226 ui.warn(_("(is your Python install correct?)\n"))
227 227 except IOError as inst:
228 228 if util.safehasattr(inst, "code"):
229 229 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
230 230 elif util.safehasattr(inst, "reason"):
231 231 try: # usually it is in the form (errno, strerror)
232 232 reason = inst.reason.args[1]
233 233 except (AttributeError, IndexError):
234 234 # it might be anything, for example a string
235 235 reason = inst.reason
236 236 if isinstance(reason, pycompat.unicode):
237 237 # SSLError of Python 2.7.9 contains a unicode
238 238 reason = encoding.unitolocal(reason)
239 239 ui.warn(_("abort: error: %s\n") % reason)
240 240 elif (util.safehasattr(inst, "args")
241 241 and inst.args and inst.args[0] == errno.EPIPE):
242 242 pass
243 243 elif getattr(inst, "strerror", None):
244 244 if getattr(inst, "filename", None):
245 245 ui.warn(_("abort: %s: %s\n") % (
246 246 encoding.strtolocal(inst.strerror),
247 247 stringutil.forcebytestr(inst.filename)))
248 248 else:
249 249 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
250 250 else:
251 251 raise
252 252 except OSError as inst:
253 253 if getattr(inst, "filename", None) is not None:
254 254 ui.warn(_("abort: %s: '%s'\n") % (
255 255 encoding.strtolocal(inst.strerror),
256 256 stringutil.forcebytestr(inst.filename)))
257 257 else:
258 258 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
259 259 except MemoryError:
260 260 ui.warn(_("abort: out of memory\n"))
261 261 except SystemExit as inst:
262 262 # Commands shouldn't sys.exit directly, but give a return code.
263 263 # Just in case catch this and and pass exit code to caller.
264 264 return inst.code
265 265 except socket.error as inst:
266 266 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
267 267
268 268 return -1
269 269
270 270 def checknewlabel(repo, lbl, kind):
271 271 # Do not use the "kind" parameter in ui output.
272 272 # It makes strings difficult to translate.
273 273 if lbl in ['tip', '.', 'null']:
274 274 raise error.Abort(_("the name '%s' is reserved") % lbl)
275 275 for c in (':', '\0', '\n', '\r'):
276 276 if c in lbl:
277 277 raise error.Abort(
278 278 _("%r cannot be used in a name") % pycompat.bytestr(c))
279 279 try:
280 280 int(lbl)
281 281 raise error.Abort(_("cannot use an integer as a name"))
282 282 except ValueError:
283 283 pass
284 284 if lbl.strip() != lbl:
285 285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
286 286
287 287 def checkfilename(f):
288 288 '''Check that the filename f is an acceptable filename for a tracked file'''
289 289 if '\r' in f or '\n' in f:
290 290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
291 291 % pycompat.bytestr(f))
292 292
293 293 def checkportable(ui, f):
294 294 '''Check if filename f is portable and warn or abort depending on config'''
295 295 checkfilename(f)
296 296 abort, warn = checkportabilityalert(ui)
297 297 if abort or warn:
298 298 msg = util.checkwinfilename(f)
299 299 if msg:
300 300 msg = "%s: %s" % (msg, procutil.shellquote(f))
301 301 if abort:
302 302 raise error.Abort(msg)
303 303 ui.warn(_("warning: %s\n") % msg)
304 304
305 305 def checkportabilityalert(ui):
306 306 '''check if the user's config requests nothing, a warning, or abort for
307 307 non-portable filenames'''
308 308 val = ui.config('ui', 'portablefilenames')
309 309 lval = val.lower()
310 310 bval = stringutil.parsebool(val)
311 311 abort = pycompat.iswindows or lval == 'abort'
312 312 warn = bval or lval == 'warn'
313 313 if bval is None and not (warn or abort or lval == 'ignore'):
314 314 raise error.ConfigError(
315 315 _("ui.portablefilenames value is invalid ('%s')") % val)
316 316 return abort, warn
317 317
318 318 class casecollisionauditor(object):
319 319 def __init__(self, ui, abort, dirstate):
320 320 self._ui = ui
321 321 self._abort = abort
322 322 allfiles = '\0'.join(dirstate._map)
323 323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
324 324 self._dirstate = dirstate
325 325 # The purpose of _newfiles is so that we don't complain about
326 326 # case collisions if someone were to call this object with the
327 327 # same filename twice.
328 328 self._newfiles = set()
329 329
330 330 def __call__(self, f):
331 331 if f in self._newfiles:
332 332 return
333 333 fl = encoding.lower(f)
334 334 if fl in self._loweredfiles and f not in self._dirstate:
335 335 msg = _('possible case-folding collision for %s') % f
336 336 if self._abort:
337 337 raise error.Abort(msg)
338 338 self._ui.warn(_("warning: %s\n") % msg)
339 339 self._loweredfiles.add(fl)
340 340 self._newfiles.add(f)
341 341
342 342 def filteredhash(repo, maxrev):
343 343 """build hash of filtered revisions in the current repoview.
344 344
345 345 Multiple caches perform up-to-date validation by checking that the
346 346 tiprev and tipnode stored in the cache file match the current repository.
347 347 However, this is not sufficient for validating repoviews because the set
348 348 of revisions in the view may change without the repository tiprev and
349 349 tipnode changing.
350 350
351 351 This function hashes all the revs filtered from the view and returns
352 352 that SHA-1 digest.
353 353 """
354 354 cl = repo.changelog
355 355 if not cl.filteredrevs:
356 356 return None
357 357 key = None
358 358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
359 359 if revs:
360 360 s = hashlib.sha1()
361 361 for rev in revs:
362 362 s.update('%d;' % rev)
363 363 key = s.digest()
364 364 return key
365 365
366 366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
367 367 '''yield every hg repository under path, always recursively.
368 368 The recurse flag will only control recursion into repo working dirs'''
369 369 def errhandler(err):
370 370 if err.filename == path:
371 371 raise err
372 372 samestat = getattr(os.path, 'samestat', None)
373 373 if followsym and samestat is not None:
374 374 def adddir(dirlst, dirname):
375 375 dirstat = os.stat(dirname)
376 376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
377 377 if not match:
378 378 dirlst.append(dirstat)
379 379 return not match
380 380 else:
381 381 followsym = False
382 382
383 383 if (seen_dirs is None) and followsym:
384 384 seen_dirs = []
385 385 adddir(seen_dirs, path)
386 386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
387 387 dirs.sort()
388 388 if '.hg' in dirs:
389 389 yield root # found a repository
390 390 qroot = os.path.join(root, '.hg', 'patches')
391 391 if os.path.isdir(os.path.join(qroot, '.hg')):
392 392 yield qroot # we have a patch queue repo here
393 393 if recurse:
394 394 # avoid recursing inside the .hg directory
395 395 dirs.remove('.hg')
396 396 else:
397 397 dirs[:] = [] # don't descend further
398 398 elif followsym:
399 399 newdirs = []
400 400 for d in dirs:
401 401 fname = os.path.join(root, d)
402 402 if adddir(seen_dirs, fname):
403 403 if os.path.islink(fname):
404 404 for hgname in walkrepos(fname, True, seen_dirs):
405 405 yield hgname
406 406 else:
407 407 newdirs.append(d)
408 408 dirs[:] = newdirs
409 409
410 410 def binnode(ctx):
411 411 """Return binary node id for a given basectx"""
412 412 node = ctx.node()
413 413 if node is None:
414 414 return wdirid
415 415 return node
416 416
417 417 def intrev(ctx):
418 418 """Return integer for a given basectx that can be used in comparison or
419 419 arithmetic operation"""
420 420 rev = ctx.rev()
421 421 if rev is None:
422 422 return wdirrev
423 423 return rev
424 424
425 425 def formatchangeid(ctx):
426 426 """Format changectx as '{rev}:{node|formatnode}', which is the default
427 427 template provided by logcmdutil.changesettemplater"""
428 428 repo = ctx.repo()
429 429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
430 430
431 431 def formatrevnode(ui, rev, node):
432 432 """Format given revision and node depending on the current verbosity"""
433 433 if ui.debugflag:
434 434 hexfunc = hex
435 435 else:
436 436 hexfunc = short
437 437 return '%d:%s' % (rev, hexfunc(node))
438 438
439 439 def resolvehexnodeidprefix(repo, prefix):
440 440 # Uses unfiltered repo because it's faster when prefix is ambiguous/
441 441 # This matches the shortesthexnodeidprefix() function below.
442 442 node = repo.unfiltered().changelog._partialmatch(prefix)
443 443 if node is None:
444 444 return
445 445 repo.changelog.rev(node) # make sure node isn't filtered
446 446 return node
447 447
448 448 def shortesthexnodeidprefix(repo, node, minlength=1):
449 449 """Find the shortest unambiguous prefix that matches hexnode."""
450 450 # _partialmatch() of filtered changelog could take O(len(repo)) time,
451 451 # which would be unacceptably slow. so we look for hash collision in
452 452 # unfiltered space, which means some hashes may be slightly longer.
453 453 cl = repo.unfiltered().changelog
454 454
455 455 def isrev(prefix):
456 456 try:
457 457 i = int(prefix)
458 458 # if we are a pure int, then starting with zero will not be
459 459 # confused as a rev; or, obviously, if the int is larger
460 460 # than the value of the tip rev
461 461 if prefix[0] == '0' or i > len(cl):
462 462 return False
463 463 return True
464 464 except ValueError:
465 465 return False
466 466
467 467 def disambiguate(prefix):
468 468 """Disambiguate against revnums."""
469 469 hexnode = hex(node)
470 470 for length in range(len(prefix), len(hexnode) + 1):
471 471 prefix = hexnode[:length]
472 472 if not isrev(prefix):
473 473 return prefix
474 474
475 475 try:
476 476 return disambiguate(cl.shortest(node, minlength))
477 477 except error.LookupError:
478 478 raise error.RepoLookupError()
479 479
480 480 def isrevsymbol(repo, symbol):
481 481 """Checks if a symbol exists in the repo.
482 482
483 483 See revsymbol() for details. Raises error.LookupError if the symbol is an
484 484 ambiguous nodeid prefix.
485 485 """
486 486 try:
487 487 revsymbol(repo, symbol)
488 488 return True
489 489 except error.RepoLookupError:
490 490 return False
491 491
492 492 def revsymbol(repo, symbol):
493 493 """Returns a context given a single revision symbol (as string).
494 494
495 495 This is similar to revsingle(), but accepts only a single revision symbol,
496 496 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
497 497 not "max(public())".
498 498 """
499 499 if not isinstance(symbol, bytes):
500 500 msg = ("symbol (%s of type %s) was not a string, did you mean "
501 501 "repo[symbol]?" % (symbol, type(symbol)))
502 502 raise error.ProgrammingError(msg)
503 503 try:
504 504 if symbol in ('.', 'tip', 'null'):
505 505 return repo[symbol]
506 506
507 507 try:
508 508 r = int(symbol)
509 509 if '%d' % r != symbol:
510 510 raise ValueError
511 511 l = len(repo.changelog)
512 512 if r < 0:
513 513 r += l
514 514 if r < 0 or r >= l and r != wdirrev:
515 515 raise ValueError
516 516 return repo[r]
517 517 except error.FilteredIndexError:
518 518 raise
519 519 except (ValueError, OverflowError, IndexError):
520 520 pass
521 521
522 522 if len(symbol) == 40:
523 523 try:
524 524 node = bin(symbol)
525 525 rev = repo.changelog.rev(node)
526 526 return repo[rev]
527 527 except error.FilteredLookupError:
528 528 raise
529 529 except (TypeError, LookupError):
530 530 pass
531 531
532 532 # look up bookmarks through the name interface
533 533 try:
534 534 node = repo.names.singlenode(repo, symbol)
535 535 rev = repo.changelog.rev(node)
536 536 return repo[rev]
537 537 except KeyError:
538 538 pass
539 539
540 540 node = resolvehexnodeidprefix(repo, symbol)
541 541 if node is not None:
542 542 rev = repo.changelog.rev(node)
543 543 return repo[rev]
544 544
545 545 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
546 546
547 547 except error.WdirUnsupported:
548 548 return repo[None]
549 549 except (error.FilteredIndexError, error.FilteredLookupError,
550 550 error.FilteredRepoLookupError):
551 551 raise _filterederror(repo, symbol)
552 552
553 553 def _filterederror(repo, changeid):
554 554 """build an exception to be raised about a filtered changeid
555 555
556 556 This is extracted in a function to help extensions (eg: evolve) to
557 557 experiment with various message variants."""
558 558 if repo.filtername.startswith('visible'):
559 559
560 560 # Check if the changeset is obsolete
561 561 unfilteredrepo = repo.unfiltered()
562 562 ctx = revsymbol(unfilteredrepo, changeid)
563 563
564 564 # If the changeset is obsolete, enrich the message with the reason
565 565 # that made this changeset not visible
566 566 if ctx.obsolete():
567 567 msg = obsutil._getfilteredreason(repo, changeid, ctx)
568 568 else:
569 569 msg = _("hidden revision '%s'") % changeid
570 570
571 571 hint = _('use --hidden to access hidden revisions')
572 572
573 573 return error.FilteredRepoLookupError(msg, hint=hint)
574 574 msg = _("filtered revision '%s' (not in '%s' subset)")
575 575 msg %= (changeid, repo.filtername)
576 576 return error.FilteredRepoLookupError(msg)
577 577
578 578 def revsingle(repo, revspec, default='.', localalias=None):
579 579 if not revspec and revspec != 0:
580 580 return repo[default]
581 581
582 582 l = revrange(repo, [revspec], localalias=localalias)
583 583 if not l:
584 584 raise error.Abort(_('empty revision set'))
585 585 return repo[l.last()]
586 586
587 587 def _pairspec(revspec):
588 588 tree = revsetlang.parse(revspec)
589 589 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
590 590
591 591 def revpair(repo, revs):
592 592 if not revs:
593 593 return repo['.'], repo[None]
594 594
595 595 l = revrange(repo, revs)
596 596
597 597 if not l:
598 598 first = second = None
599 599 elif l.isascending():
600 600 first = l.min()
601 601 second = l.max()
602 602 elif l.isdescending():
603 603 first = l.max()
604 604 second = l.min()
605 605 else:
606 606 first = l.first()
607 607 second = l.last()
608 608
609 609 if first is None:
610 610 raise error.Abort(_('empty revision range'))
611 611 if (first == second and len(revs) >= 2
612 612 and not all(revrange(repo, [r]) for r in revs)):
613 613 raise error.Abort(_('empty revision on one side of range'))
614 614
615 615 # if top-level is range expression, the result must always be a pair
616 616 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
617 617 return repo[first], repo[None]
618 618
619 619 return repo[first], repo[second]
620 620
621 621 def revrange(repo, specs, localalias=None):
622 622 """Execute 1 to many revsets and return the union.
623 623
624 624 This is the preferred mechanism for executing revsets using user-specified
625 625 config options, such as revset aliases.
626 626
627 627 The revsets specified by ``specs`` will be executed via a chained ``OR``
628 628 expression. If ``specs`` is empty, an empty result is returned.
629 629
630 630 ``specs`` can contain integers, in which case they are assumed to be
631 631 revision numbers.
632 632
633 633 It is assumed the revsets are already formatted. If you have arguments
634 634 that need to be expanded in the revset, call ``revsetlang.formatspec()``
635 635 and pass the result as an element of ``specs``.
636 636
637 637 Specifying a single revset is allowed.
638 638
639 639 Returns a ``revset.abstractsmartset`` which is a list-like interface over
640 640 integer revisions.
641 641 """
642 642 allspecs = []
643 643 for spec in specs:
644 644 if isinstance(spec, int):
645 645 spec = revsetlang.formatspec('rev(%d)', spec)
646 646 allspecs.append(spec)
647 647 return repo.anyrevs(allspecs, user=True, localalias=localalias)
648 648
649 649 def meaningfulparents(repo, ctx):
650 650 """Return list of meaningful (or all if debug) parentrevs for rev.
651 651
652 652 For merges (two non-nullrev revisions) both parents are meaningful.
653 653 Otherwise the first parent revision is considered meaningful if it
654 654 is not the preceding revision.
655 655 """
656 656 parents = ctx.parents()
657 657 if len(parents) > 1:
658 658 return parents
659 659 if repo.ui.debugflag:
660 660 return [parents[0], repo['null']]
661 661 if parents[0].rev() >= intrev(ctx) - 1:
662 662 return []
663 663 return parents
664 664
665 665 def expandpats(pats):
666 666 '''Expand bare globs when running on windows.
667 667 On posix we assume it already has already been done by sh.'''
668 668 if not util.expandglobs:
669 669 return list(pats)
670 670 ret = []
671 671 for kindpat in pats:
672 672 kind, pat = matchmod._patsplit(kindpat, None)
673 673 if kind is None:
674 674 try:
675 675 globbed = glob.glob(pat)
676 676 except re.error:
677 677 globbed = [pat]
678 678 if globbed:
679 679 ret.extend(globbed)
680 680 continue
681 681 ret.append(kindpat)
682 682 return ret
683 683
684 684 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
685 685 badfn=None):
686 686 '''Return a matcher and the patterns that were used.
687 687 The matcher will warn about bad matches, unless an alternate badfn callback
688 688 is provided.'''
689 689 if pats == ("",):
690 690 pats = []
691 691 if opts is None:
692 692 opts = {}
693 693 if not globbed and default == 'relpath':
694 694 pats = expandpats(pats or [])
695 695
696 696 def bad(f, msg):
697 697 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
698 698
699 699 if badfn is None:
700 700 badfn = bad
701 701
702 702 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
703 703 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
704 704
705 705 if m.always():
706 706 pats = []
707 707 return m, pats
708 708
709 709 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
710 710 badfn=None):
711 711 '''Return a matcher that will warn about bad matches.'''
712 712 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
713 713
714 714 def matchall(repo):
715 715 '''Return a matcher that will efficiently match everything.'''
716 716 return matchmod.always(repo.root, repo.getcwd())
717 717
718 718 def matchfiles(repo, files, badfn=None):
719 719 '''Return a matcher that will efficiently match exactly these files.'''
720 720 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
721 721
722 722 def parsefollowlinespattern(repo, rev, pat, msg):
723 723 """Return a file name from `pat` pattern suitable for usage in followlines
724 724 logic.
725 725 """
726 726 if not matchmod.patkind(pat):
727 727 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
728 728 else:
729 729 ctx = repo[rev]
730 730 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
731 731 files = [f for f in ctx if m(f)]
732 732 if len(files) != 1:
733 733 raise error.ParseError(msg)
734 734 return files[0]
735 735
736 736 def origpath(ui, repo, filepath):
737 737 '''customize where .orig files are created
738 738
739 739 Fetch user defined path from config file: [ui] origbackuppath = <path>
740 740 Fall back to default (filepath with .orig suffix) if not specified
741 741 '''
742 742 origbackuppath = ui.config('ui', 'origbackuppath')
743 743 if not origbackuppath:
744 744 return filepath + ".orig"
745 745
746 746 # Convert filepath from an absolute path into a path inside the repo.
747 747 filepathfromroot = util.normpath(os.path.relpath(filepath,
748 748 start=repo.root))
749 749
750 750 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
751 751 origbackupdir = origvfs.dirname(filepathfromroot)
752 752 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
753 753 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
754 754
755 755 # Remove any files that conflict with the backup file's path
756 756 for f in reversed(list(util.finddirs(filepathfromroot))):
757 757 if origvfs.isfileorlink(f):
758 758 ui.note(_('removing conflicting file: %s\n')
759 759 % origvfs.join(f))
760 760 origvfs.unlink(f)
761 761 break
762 762
763 763 origvfs.makedirs(origbackupdir)
764 764
765 765 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
766 766 ui.note(_('removing conflicting directory: %s\n')
767 767 % origvfs.join(filepathfromroot))
768 768 origvfs.rmtree(filepathfromroot, forcibly=True)
769 769
770 770 return origvfs.join(filepathfromroot)
771 771
772 772 class _containsnode(object):
773 773 """proxy __contains__(node) to container.__contains__ which accepts revs"""
774 774
775 775 def __init__(self, repo, revcontainer):
776 776 self._torev = repo.changelog.rev
777 777 self._revcontains = revcontainer.__contains__
778 778
779 779 def __contains__(self, node):
780 780 return self._revcontains(self._torev(node))
781 781
782 782 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
783 783 """do common cleanups when old nodes are replaced by new nodes
784 784
785 785 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
786 786 (we might also want to move working directory parent in the future)
787 787
788 788 By default, bookmark moves are calculated automatically from 'replacements',
789 789 but 'moves' can be used to override that. Also, 'moves' may include
790 790 additional bookmark moves that should not have associated obsmarkers.
791 791
792 792 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
793 793 have replacements. operation is a string, like "rebase".
794 794
795 795 metadata is dictionary containing metadata to be stored in obsmarker if
796 796 obsolescence is enabled.
797 797 """
798 798 if not replacements and not moves:
799 799 return
800 800
801 801 # translate mapping's other forms
802 802 if not util.safehasattr(replacements, 'items'):
803 803 replacements = {n: () for n in replacements}
804 804
805 805 # Calculate bookmark movements
806 806 if moves is None:
807 807 moves = {}
808 808 # Unfiltered repo is needed since nodes in replacements might be hidden.
809 809 unfi = repo.unfiltered()
810 810 for oldnode, newnodes in replacements.items():
811 811 if oldnode in moves:
812 812 continue
813 813 if len(newnodes) > 1:
814 814 # usually a split, take the one with biggest rev number
815 815 newnode = next(unfi.set('max(%ln)', newnodes)).node()
816 816 elif len(newnodes) == 0:
817 817 # move bookmark backwards
818 818 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
819 819 list(replacements)))
820 820 if roots:
821 821 newnode = roots[0].node()
822 822 else:
823 823 newnode = nullid
824 824 else:
825 825 newnode = newnodes[0]
826 826 moves[oldnode] = newnode
827 827
828 828 with repo.transaction('cleanup') as tr:
829 829 # Move bookmarks
830 830 bmarks = repo._bookmarks
831 831 bmarkchanges = []
832 832 allnewnodes = [n for ns in replacements.values() for n in ns]
833 833 for oldnode, newnode in moves.items():
834 834 oldbmarks = repo.nodebookmarks(oldnode)
835 835 if not oldbmarks:
836 836 continue
837 837 from . import bookmarks # avoid import cycle
838 838 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
839 839 (util.rapply(pycompat.maybebytestr, oldbmarks),
840 840 hex(oldnode), hex(newnode)))
841 841 # Delete divergent bookmarks being parents of related newnodes
842 842 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
843 843 allnewnodes, newnode, oldnode)
844 844 deletenodes = _containsnode(repo, deleterevs)
845 845 for name in oldbmarks:
846 846 bmarkchanges.append((name, newnode))
847 847 for b in bookmarks.divergent2delete(repo, deletenodes, name):
848 848 bmarkchanges.append((b, None))
849 849
850 850 if bmarkchanges:
851 851 bmarks.applychanges(repo, tr, bmarkchanges)
852 852
853 853 # Obsolete or strip nodes
854 854 if obsolete.isenabled(repo, obsolete.createmarkersopt):
855 855 # If a node is already obsoleted, and we want to obsolete it
856 856 # without a successor, skip that obssolete request since it's
857 857 # unnecessary. That's the "if s or not isobs(n)" check below.
858 858 # Also sort the node in topology order, that might be useful for
859 859 # some obsstore logic.
860 860 # NOTE: the filtering and sorting might belong to createmarkers.
861 861 isobs = unfi.obsstore.successors.__contains__
862 862 torev = unfi.changelog.rev
863 863 sortfunc = lambda ns: torev(ns[0])
864 864 rels = [(unfi[n], tuple(unfi[m] for m in s))
865 865 for n, s in sorted(replacements.items(), key=sortfunc)
866 866 if s or not isobs(n)]
867 867 if rels:
868 868 obsolete.createmarkers(repo, rels, operation=operation,
869 869 metadata=metadata)
870 870 else:
871 871 from . import repair # avoid import cycle
872 872 tostrip = list(replacements)
873 873 if tostrip:
874 874 repair.delayedstrip(repo.ui, repo, tostrip, operation)
875 875
876 876 def addremove(repo, matcher, prefix, opts=None):
877 877 if opts is None:
878 878 opts = {}
879 879 m = matcher
880 880 dry_run = opts.get('dry_run')
881 881 try:
882 882 similarity = float(opts.get('similarity') or 0)
883 883 except ValueError:
884 884 raise error.Abort(_('similarity must be a number'))
885 885 if similarity < 0 or similarity > 100:
886 886 raise error.Abort(_('similarity must be between 0 and 100'))
887 887 similarity /= 100.0
888 888
889 889 ret = 0
890 890 join = lambda f: os.path.join(prefix, f)
891 891
892 892 wctx = repo[None]
893 893 for subpath in sorted(wctx.substate):
894 894 submatch = matchmod.subdirmatcher(subpath, m)
895 895 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
896 896 sub = wctx.sub(subpath)
897 897 try:
898 898 if sub.addremove(submatch, prefix, opts):
899 899 ret = 1
900 900 except error.LookupError:
901 901 repo.ui.status(_("skipping missing subrepository: %s\n")
902 902 % join(subpath))
903 903
904 904 rejected = []
905 905 def badfn(f, msg):
906 906 if f in m.files():
907 907 m.bad(f, msg)
908 908 rejected.append(f)
909 909
910 910 badmatch = matchmod.badmatch(m, badfn)
911 911 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
912 912 badmatch)
913 913
914 914 unknownset = set(unknown + forgotten)
915 915 toprint = unknownset.copy()
916 916 toprint.update(deleted)
917 917 for abs in sorted(toprint):
918 918 if repo.ui.verbose or not m.exact(abs):
919 919 if abs in unknownset:
920 920 status = _('adding %s\n') % m.uipath(abs)
921 921 else:
922 922 status = _('removing %s\n') % m.uipath(abs)
923 923 repo.ui.status(status)
924 924
925 925 renames = _findrenames(repo, m, added + unknown, removed + deleted,
926 926 similarity)
927 927
928 928 if not dry_run:
929 929 _markchanges(repo, unknown + forgotten, deleted, renames)
930 930
931 931 for f in rejected:
932 932 if f in m.files():
933 933 return 1
934 934 return ret
935 935
936 936 def marktouched(repo, files, similarity=0.0):
937 937 '''Assert that files have somehow been operated upon. files are relative to
938 938 the repo root.'''
939 939 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
940 940 rejected = []
941 941
942 942 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
943 943
944 944 if repo.ui.verbose:
945 945 unknownset = set(unknown + forgotten)
946 946 toprint = unknownset.copy()
947 947 toprint.update(deleted)
948 948 for abs in sorted(toprint):
949 949 if abs in unknownset:
950 950 status = _('adding %s\n') % abs
951 951 else:
952 952 status = _('removing %s\n') % abs
953 953 repo.ui.status(status)
954 954
955 955 renames = _findrenames(repo, m, added + unknown, removed + deleted,
956 956 similarity)
957 957
958 958 _markchanges(repo, unknown + forgotten, deleted, renames)
959 959
960 960 for f in rejected:
961 961 if f in m.files():
962 962 return 1
963 963 return 0
964 964
965 965 def _interestingfiles(repo, matcher):
966 966 '''Walk dirstate with matcher, looking for files that addremove would care
967 967 about.
968 968
969 969 This is different from dirstate.status because it doesn't care about
970 970 whether files are modified or clean.'''
971 971 added, unknown, deleted, removed, forgotten = [], [], [], [], []
972 972 audit_path = pathutil.pathauditor(repo.root, cached=True)
973 973
974 974 ctx = repo[None]
975 975 dirstate = repo.dirstate
976 976 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
977 977 unknown=True, ignored=False, full=False)
978 978 for abs, st in walkresults.iteritems():
979 979 dstate = dirstate[abs]
980 980 if dstate == '?' and audit_path.check(abs):
981 981 unknown.append(abs)
982 982 elif dstate != 'r' and not st:
983 983 deleted.append(abs)
984 984 elif dstate == 'r' and st:
985 985 forgotten.append(abs)
986 986 # for finding renames
987 987 elif dstate == 'r' and not st:
988 988 removed.append(abs)
989 989 elif dstate == 'a':
990 990 added.append(abs)
991 991
992 992 return added, unknown, deleted, removed, forgotten
993 993
994 994 def _findrenames(repo, matcher, added, removed, similarity):
995 995 '''Find renames from removed files to added ones.'''
996 996 renames = {}
997 997 if similarity > 0:
998 998 for old, new, score in similar.findrenames(repo, added, removed,
999 999 similarity):
1000 1000 if (repo.ui.verbose or not matcher.exact(old)
1001 1001 or not matcher.exact(new)):
1002 1002 repo.ui.status(_('recording removal of %s as rename to %s '
1003 1003 '(%d%% similar)\n') %
1004 1004 (matcher.rel(old), matcher.rel(new),
1005 1005 score * 100))
1006 1006 renames[new] = old
1007 1007 return renames
1008 1008
1009 1009 def _markchanges(repo, unknown, deleted, renames):
1010 1010 '''Marks the files in unknown as added, the files in deleted as removed,
1011 1011 and the files in renames as copied.'''
1012 1012 wctx = repo[None]
1013 1013 with repo.wlock():
1014 1014 wctx.forget(deleted)
1015 1015 wctx.add(unknown)
1016 1016 for new, old in renames.iteritems():
1017 1017 wctx.copy(old, new)
1018 1018
1019 1019 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1020 1020 """Update the dirstate to reflect the intent of copying src to dst. For
1021 1021 different reasons it might not end with dst being marked as copied from src.
1022 1022 """
1023 1023 origsrc = repo.dirstate.copied(src) or src
1024 1024 if dst == origsrc: # copying back a copy?
1025 1025 if repo.dirstate[dst] not in 'mn' and not dryrun:
1026 1026 repo.dirstate.normallookup(dst)
1027 1027 else:
1028 1028 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1029 1029 if not ui.quiet:
1030 1030 ui.warn(_("%s has not been committed yet, so no copy "
1031 1031 "data will be stored for %s.\n")
1032 1032 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1033 1033 if repo.dirstate[dst] in '?r' and not dryrun:
1034 1034 wctx.add([dst])
1035 1035 elif not dryrun:
1036 1036 wctx.copy(origsrc, dst)
1037 1037
1038 1038 def readrequires(opener, supported):
1039 1039 '''Reads and parses .hg/requires and checks if all entries found
1040 1040 are in the list of supported features.'''
1041 1041 requirements = set(opener.read("requires").splitlines())
1042 1042 missings = []
1043 1043 for r in requirements:
1044 1044 if r not in supported:
1045 1045 if not r or not r[0:1].isalnum():
1046 1046 raise error.RequirementError(_(".hg/requires file is corrupt"))
1047 1047 missings.append(r)
1048 1048 missings.sort()
1049 1049 if missings:
1050 1050 raise error.RequirementError(
1051 1051 _("repository requires features unknown to this Mercurial: %s")
1052 1052 % " ".join(missings),
1053 1053 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1054 1054 " for more information"))
1055 1055 return requirements
1056 1056
1057 1057 def writerequires(opener, requirements):
1058 1058 with opener('requires', 'w') as fp:
1059 1059 for r in sorted(requirements):
1060 1060 fp.write("%s\n" % r)
1061 1061
1062 1062 class filecachesubentry(object):
1063 1063 def __init__(self, path, stat):
1064 1064 self.path = path
1065 1065 self.cachestat = None
1066 1066 self._cacheable = None
1067 1067
1068 1068 if stat:
1069 1069 self.cachestat = filecachesubentry.stat(self.path)
1070 1070
1071 1071 if self.cachestat:
1072 1072 self._cacheable = self.cachestat.cacheable()
1073 1073 else:
1074 1074 # None means we don't know yet
1075 1075 self._cacheable = None
1076 1076
1077 1077 def refresh(self):
1078 1078 if self.cacheable():
1079 1079 self.cachestat = filecachesubentry.stat(self.path)
1080 1080
1081 1081 def cacheable(self):
1082 1082 if self._cacheable is not None:
1083 1083 return self._cacheable
1084 1084
1085 1085 # we don't know yet, assume it is for now
1086 1086 return True
1087 1087
1088 1088 def changed(self):
1089 1089 # no point in going further if we can't cache it
1090 1090 if not self.cacheable():
1091 1091 return True
1092 1092
1093 1093 newstat = filecachesubentry.stat(self.path)
1094 1094
1095 1095 # we may not know if it's cacheable yet, check again now
1096 1096 if newstat and self._cacheable is None:
1097 1097 self._cacheable = newstat.cacheable()
1098 1098
1099 1099 # check again
1100 1100 if not self._cacheable:
1101 1101 return True
1102 1102
1103 1103 if self.cachestat != newstat:
1104 1104 self.cachestat = newstat
1105 1105 return True
1106 1106 else:
1107 1107 return False
1108 1108
1109 1109 @staticmethod
1110 1110 def stat(path):
1111 1111 try:
1112 1112 return util.cachestat(path)
1113 1113 except OSError as e:
1114 1114 if e.errno != errno.ENOENT:
1115 1115 raise
1116 1116
1117 1117 class filecacheentry(object):
1118 1118 def __init__(self, paths, stat=True):
1119 1119 self._entries = []
1120 1120 for path in paths:
1121 1121 self._entries.append(filecachesubentry(path, stat))
1122 1122
1123 1123 def changed(self):
1124 1124 '''true if any entry has changed'''
1125 1125 for entry in self._entries:
1126 1126 if entry.changed():
1127 1127 return True
1128 1128 return False
1129 1129
1130 1130 def refresh(self):
1131 1131 for entry in self._entries:
1132 1132 entry.refresh()
1133 1133
1134 1134 class filecache(object):
1135 1135 '''A property like decorator that tracks files under .hg/ for updates.
1136 1136
1137 1137 Records stat info when called in _filecache.
1138 1138
1139 1139 On subsequent calls, compares old stat info with new info, and recreates the
1140 1140 object when any of the files changes, updating the new stat info in
1141 1141 _filecache.
1142 1142
1143 1143 Mercurial either atomic renames or appends for files under .hg,
1144 1144 so to ensure the cache is reliable we need the filesystem to be able
1145 1145 to tell us if a file has been replaced. If it can't, we fallback to
1146 1146 recreating the object on every call (essentially the same behavior as
1147 1147 propertycache).
1148 1148
1149 1149 '''
1150 1150 def __init__(self, *paths):
1151 1151 self.paths = paths
1152 1152
1153 1153 def join(self, obj, fname):
1154 1154 """Used to compute the runtime path of a cached file.
1155 1155
1156 1156 Users should subclass filecache and provide their own version of this
1157 1157 function to call the appropriate join function on 'obj' (an instance
1158 1158 of the class that its member function was decorated).
1159 1159 """
1160 1160 raise NotImplementedError
1161 1161
1162 1162 def __call__(self, func):
1163 1163 self.func = func
1164 1164 self.sname = func.__name__
1165 1165 self.name = pycompat.sysbytes(self.sname)
1166 1166 return self
1167 1167
1168 1168 def __get__(self, obj, type=None):
1169 1169 # if accessed on the class, return the descriptor itself.
1170 1170 if obj is None:
1171 1171 return self
1172 1172 # do we need to check if the file changed?
1173 1173 if self.sname in obj.__dict__:
1174 1174 assert self.name in obj._filecache, self.name
1175 1175 return obj.__dict__[self.sname]
1176 1176
1177 1177 entry = obj._filecache.get(self.name)
1178 1178
1179 1179 if entry:
1180 1180 if entry.changed():
1181 1181 entry.obj = self.func(obj)
1182 1182 else:
1183 1183 paths = [self.join(obj, path) for path in self.paths]
1184 1184
1185 1185 # We stat -before- creating the object so our cache doesn't lie if
1186 1186 # a writer modified between the time we read and stat
1187 1187 entry = filecacheentry(paths, True)
1188 1188 entry.obj = self.func(obj)
1189 1189
1190 1190 obj._filecache[self.name] = entry
1191 1191
1192 1192 obj.__dict__[self.sname] = entry.obj
1193 1193 return entry.obj
1194 1194
1195 1195 def __set__(self, obj, value):
1196 1196 if self.name not in obj._filecache:
1197 1197 # we add an entry for the missing value because X in __dict__
1198 1198 # implies X in _filecache
1199 1199 paths = [self.join(obj, path) for path in self.paths]
1200 1200 ce = filecacheentry(paths, False)
1201 1201 obj._filecache[self.name] = ce
1202 1202 else:
1203 1203 ce = obj._filecache[self.name]
1204 1204
1205 1205 ce.obj = value # update cached copy
1206 1206 obj.__dict__[self.sname] = value # update copy returned by obj.x
1207 1207
1208 1208 def __delete__(self, obj):
1209 1209 try:
1210 1210 del obj.__dict__[self.sname]
1211 1211 except KeyError:
1212 1212 raise AttributeError(self.sname)
1213 1213
1214 1214 def extdatasource(repo, source):
1215 1215 """Gather a map of rev -> value dict from the specified source
1216 1216
1217 1217 A source spec is treated as a URL, with a special case shell: type
1218 1218 for parsing the output from a shell command.
1219 1219
1220 1220 The data is parsed as a series of newline-separated records where
1221 1221 each record is a revision specifier optionally followed by a space
1222 1222 and a freeform string value. If the revision is known locally, it
1223 1223 is converted to a rev, otherwise the record is skipped.
1224 1224
1225 1225 Note that both key and value are treated as UTF-8 and converted to
1226 1226 the local encoding. This allows uniformity between local and
1227 1227 remote data sources.
1228 1228 """
1229 1229
1230 1230 spec = repo.ui.config("extdata", source)
1231 1231 if not spec:
1232 1232 raise error.Abort(_("unknown extdata source '%s'") % source)
1233 1233
1234 1234 data = {}
1235 1235 src = proc = None
1236 1236 try:
1237 1237 if spec.startswith("shell:"):
1238 1238 # external commands should be run relative to the repo root
1239 1239 cmd = spec[6:]
1240 1240 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1241 1241 close_fds=procutil.closefds,
1242 1242 stdout=subprocess.PIPE, cwd=repo.root)
1243 1243 src = proc.stdout
1244 1244 else:
1245 1245 # treat as a URL or file
1246 1246 src = url.open(repo.ui, spec)
1247 1247 for l in src:
1248 1248 if " " in l:
1249 1249 k, v = l.strip().split(" ", 1)
1250 1250 else:
1251 1251 k, v = l.strip(), ""
1252 1252
1253 1253 k = encoding.tolocal(k)
1254 1254 try:
1255 1255 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1256 1256 except (error.LookupError, error.RepoLookupError):
1257 1257 pass # we ignore data for nodes that don't exist locally
1258 1258 finally:
1259 1259 if proc:
1260 1260 proc.communicate()
1261 1261 if src:
1262 1262 src.close()
1263 1263 if proc and proc.returncode != 0:
1264 1264 raise error.Abort(_("extdata command '%s' failed: %s")
1265 1265 % (cmd, procutil.explainexit(proc.returncode)))
1266 1266
1267 1267 return data
1268 1268
1269 1269 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1270 1270 if lock is None:
1271 1271 raise error.LockInheritanceContractViolation(
1272 1272 'lock can only be inherited while held')
1273 1273 if environ is None:
1274 1274 environ = {}
1275 1275 with lock.inherit() as locker:
1276 1276 environ[envvar] = locker
1277 1277 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1278 1278
1279 1279 def wlocksub(repo, cmd, *args, **kwargs):
1280 1280 """run cmd as a subprocess that allows inheriting repo's wlock
1281 1281
1282 1282 This can only be called while the wlock is held. This takes all the
1283 1283 arguments that ui.system does, and returns the exit code of the
1284 1284 subprocess."""
1285 1285 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1286 1286 **kwargs)
1287 1287
1288 class progress(object):
1289 def __init__(self, ui, topic, unit="", total=None):
1290 self.ui = ui
1291 self.pos = 0
1292 self.topic = topic
1293 self.unit = unit
1294 self.total = total
1295
1296 def update(self, pos, item="", total=None):
1297 if total:
1298 self.total = total
1299 self.pos = pos
1300 self._print(item)
1301
1302 def increment(self, step=1, item="", total=None):
1303 self.update(self.pos + step, item, total)
1304
1305 def _print(self, item):
1306 self.ui.progress(self.topic, self.pos, item, self.unit,
1307 self.total)
1308
1288 1309 def gdinitconfig(ui):
1289 1310 """helper function to know if a repo should be created as general delta
1290 1311 """
1291 1312 # experimental config: format.generaldelta
1292 1313 return (ui.configbool('format', 'generaldelta')
1293 1314 or ui.configbool('format', 'usegeneraldelta'))
1294 1315
1295 1316 def gddeltaconfig(ui):
1296 1317 """helper function to know if incoming delta should be optimised
1297 1318 """
1298 1319 # experimental config: format.generaldelta
1299 1320 return ui.configbool('format', 'generaldelta')
1300 1321
1301 1322 class simplekeyvaluefile(object):
1302 1323 """A simple file with key=value lines
1303 1324
1304 1325 Keys must be alphanumerics and start with a letter, values must not
1305 1326 contain '\n' characters"""
1306 1327 firstlinekey = '__firstline'
1307 1328
1308 1329 def __init__(self, vfs, path, keys=None):
1309 1330 self.vfs = vfs
1310 1331 self.path = path
1311 1332
1312 1333 def read(self, firstlinenonkeyval=False):
1313 1334 """Read the contents of a simple key-value file
1314 1335
1315 1336 'firstlinenonkeyval' indicates whether the first line of file should
1316 1337 be treated as a key-value pair or reuturned fully under the
1317 1338 __firstline key."""
1318 1339 lines = self.vfs.readlines(self.path)
1319 1340 d = {}
1320 1341 if firstlinenonkeyval:
1321 1342 if not lines:
1322 1343 e = _("empty simplekeyvalue file")
1323 1344 raise error.CorruptedState(e)
1324 1345 # we don't want to include '\n' in the __firstline
1325 1346 d[self.firstlinekey] = lines[0][:-1]
1326 1347 del lines[0]
1327 1348
1328 1349 try:
1329 1350 # the 'if line.strip()' part prevents us from failing on empty
1330 1351 # lines which only contain '\n' therefore are not skipped
1331 1352 # by 'if line'
1332 1353 updatedict = dict(line[:-1].split('=', 1) for line in lines
1333 1354 if line.strip())
1334 1355 if self.firstlinekey in updatedict:
1335 1356 e = _("%r can't be used as a key")
1336 1357 raise error.CorruptedState(e % self.firstlinekey)
1337 1358 d.update(updatedict)
1338 1359 except ValueError as e:
1339 1360 raise error.CorruptedState(str(e))
1340 1361 return d
1341 1362
1342 1363 def write(self, data, firstline=None):
1343 1364 """Write key=>value mapping to a file
1344 1365 data is a dict. Keys must be alphanumerical and start with a letter.
1345 1366 Values must not contain newline characters.
1346 1367
1347 1368 If 'firstline' is not None, it is written to file before
1348 1369 everything else, as it is, not in a key=value form"""
1349 1370 lines = []
1350 1371 if firstline is not None:
1351 1372 lines.append('%s\n' % firstline)
1352 1373
1353 1374 for k, v in data.items():
1354 1375 if k == self.firstlinekey:
1355 1376 e = "key name '%s' is reserved" % self.firstlinekey
1356 1377 raise error.ProgrammingError(e)
1357 1378 if not k[0:1].isalpha():
1358 1379 e = "keys must start with a letter in a key-value file"
1359 1380 raise error.ProgrammingError(e)
1360 1381 if not k.isalnum():
1361 1382 e = "invalid key name in a simple key-value file"
1362 1383 raise error.ProgrammingError(e)
1363 1384 if '\n' in v:
1364 1385 e = "invalid value in a simple key-value file"
1365 1386 raise error.ProgrammingError(e)
1366 1387 lines.append("%s=%s\n" % (k, v))
1367 1388 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1368 1389 fp.write(''.join(lines))
1369 1390
1370 1391 _reportobsoletedsource = [
1371 1392 'debugobsolete',
1372 1393 'pull',
1373 1394 'push',
1374 1395 'serve',
1375 1396 'unbundle',
1376 1397 ]
1377 1398
1378 1399 _reportnewcssource = [
1379 1400 'pull',
1380 1401 'unbundle',
1381 1402 ]
1382 1403
1383 1404 def prefetchfiles(repo, revs, match):
1384 1405 """Invokes the registered file prefetch functions, allowing extensions to
1385 1406 ensure the corresponding files are available locally, before the command
1386 1407 uses them."""
1387 1408 if match:
1388 1409 # The command itself will complain about files that don't exist, so
1389 1410 # don't duplicate the message.
1390 1411 match = matchmod.badmatch(match, lambda fn, msg: None)
1391 1412 else:
1392 1413 match = matchall(repo)
1393 1414
1394 1415 fileprefetchhooks(repo, revs, match)
1395 1416
1396 1417 # a list of (repo, revs, match) prefetch functions
1397 1418 fileprefetchhooks = util.hooks()
1398 1419
1399 1420 # A marker that tells the evolve extension to suppress its own reporting
1400 1421 _reportstroubledchangesets = True
1401 1422
1402 1423 def registersummarycallback(repo, otr, txnname=''):
1403 1424 """register a callback to issue a summary after the transaction is closed
1404 1425 """
1405 1426 def txmatch(sources):
1406 1427 return any(txnname.startswith(source) for source in sources)
1407 1428
1408 1429 categories = []
1409 1430
1410 1431 def reportsummary(func):
1411 1432 """decorator for report callbacks."""
1412 1433 # The repoview life cycle is shorter than the one of the actual
1413 1434 # underlying repository. So the filtered object can die before the
1414 1435 # weakref is used leading to troubles. We keep a reference to the
1415 1436 # unfiltered object and restore the filtering when retrieving the
1416 1437 # repository through the weakref.
1417 1438 filtername = repo.filtername
1418 1439 reporef = weakref.ref(repo.unfiltered())
1419 1440 def wrapped(tr):
1420 1441 repo = reporef()
1421 1442 if filtername:
1422 1443 repo = repo.filtered(filtername)
1423 1444 func(repo, tr)
1424 1445 newcat = '%02i-txnreport' % len(categories)
1425 1446 otr.addpostclose(newcat, wrapped)
1426 1447 categories.append(newcat)
1427 1448 return wrapped
1428 1449
1429 1450 if txmatch(_reportobsoletedsource):
1430 1451 @reportsummary
1431 1452 def reportobsoleted(repo, tr):
1432 1453 obsoleted = obsutil.getobsoleted(repo, tr)
1433 1454 if obsoleted:
1434 1455 repo.ui.status(_('obsoleted %i changesets\n')
1435 1456 % len(obsoleted))
1436 1457
1437 1458 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1438 1459 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1439 1460 instabilitytypes = [
1440 1461 ('orphan', 'orphan'),
1441 1462 ('phase-divergent', 'phasedivergent'),
1442 1463 ('content-divergent', 'contentdivergent'),
1443 1464 ]
1444 1465
1445 1466 def getinstabilitycounts(repo):
1446 1467 filtered = repo.changelog.filteredrevs
1447 1468 counts = {}
1448 1469 for instability, revset in instabilitytypes:
1449 1470 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1450 1471 filtered)
1451 1472 return counts
1452 1473
1453 1474 oldinstabilitycounts = getinstabilitycounts(repo)
1454 1475 @reportsummary
1455 1476 def reportnewinstabilities(repo, tr):
1456 1477 newinstabilitycounts = getinstabilitycounts(repo)
1457 1478 for instability, revset in instabilitytypes:
1458 1479 delta = (newinstabilitycounts[instability] -
1459 1480 oldinstabilitycounts[instability])
1460 1481 if delta > 0:
1461 1482 repo.ui.warn(_('%i new %s changesets\n') %
1462 1483 (delta, instability))
1463 1484
1464 1485 if txmatch(_reportnewcssource):
1465 1486 @reportsummary
1466 1487 def reportnewcs(repo, tr):
1467 1488 """Report the range of new revisions pulled/unbundled."""
1468 1489 newrevs = tr.changes.get('revs', xrange(0, 0))
1469 1490 if not newrevs:
1470 1491 return
1471 1492
1472 1493 # Compute the bounds of new revisions' range, excluding obsoletes.
1473 1494 unfi = repo.unfiltered()
1474 1495 revs = unfi.revs('%ld and not obsolete()', newrevs)
1475 1496 if not revs:
1476 1497 # Got only obsoletes.
1477 1498 return
1478 1499 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1479 1500
1480 1501 if minrev == maxrev:
1481 1502 revrange = minrev
1482 1503 else:
1483 1504 revrange = '%s:%s' % (minrev, maxrev)
1484 1505 repo.ui.status(_('new changesets %s\n') % revrange)
1485 1506
1486 1507 @reportsummary
1487 1508 def reportphasechanges(repo, tr):
1488 1509 """Report statistics of phase changes for changesets pre-existing
1489 1510 pull/unbundle.
1490 1511 """
1491 1512 newrevs = tr.changes.get('revs', xrange(0, 0))
1492 1513 phasetracking = tr.changes.get('phases', {})
1493 1514 if not phasetracking:
1494 1515 return
1495 1516 published = [
1496 1517 rev for rev, (old, new) in phasetracking.iteritems()
1497 1518 if new == phases.public and rev not in newrevs
1498 1519 ]
1499 1520 if not published:
1500 1521 return
1501 1522 repo.ui.status(_('%d local changesets published\n')
1502 1523 % len(published))
1503 1524
1504 1525 def nodesummaries(repo, nodes, maxnumnodes=4):
1505 1526 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1506 1527 return ' '.join(short(h) for h in nodes)
1507 1528 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1508 1529 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1509 1530
1510 1531 def enforcesinglehead(repo, tr, desc):
1511 1532 """check that no named branch has multiple heads"""
1512 1533 if desc in ('strip', 'repair'):
1513 1534 # skip the logic during strip
1514 1535 return
1515 1536 visible = repo.filtered('visible')
1516 1537 # possible improvement: we could restrict the check to affected branch
1517 1538 for name, heads in visible.branchmap().iteritems():
1518 1539 if len(heads) > 1:
1519 1540 msg = _('rejecting multiple heads on branch "%s"')
1520 1541 msg %= name
1521 1542 hint = _('%d heads: %s')
1522 1543 hint %= (len(heads), nodesummaries(repo, heads))
1523 1544 raise error.Abort(msg, hint=hint)
1524 1545
1525 1546 def wrapconvertsink(sink):
1526 1547 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1527 1548 before it is used, whether or not the convert extension was formally loaded.
1528 1549 """
1529 1550 return sink
1530 1551
1531 1552 def unhidehashlikerevs(repo, specs, hiddentype):
1532 1553 """parse the user specs and unhide changesets whose hash or revision number
1533 1554 is passed.
1534 1555
1535 1556 hiddentype can be: 1) 'warn': warn while unhiding changesets
1536 1557 2) 'nowarn': don't warn while unhiding changesets
1537 1558
1538 1559 returns a repo object with the required changesets unhidden
1539 1560 """
1540 1561 if not repo.filtername or not repo.ui.configbool('experimental',
1541 1562 'directaccess'):
1542 1563 return repo
1543 1564
1544 1565 if repo.filtername not in ('visible', 'visible-hidden'):
1545 1566 return repo
1546 1567
1547 1568 symbols = set()
1548 1569 for spec in specs:
1549 1570 try:
1550 1571 tree = revsetlang.parse(spec)
1551 1572 except error.ParseError: # will be reported by scmutil.revrange()
1552 1573 continue
1553 1574
1554 1575 symbols.update(revsetlang.gethashlikesymbols(tree))
1555 1576
1556 1577 if not symbols:
1557 1578 return repo
1558 1579
1559 1580 revs = _getrevsfromsymbols(repo, symbols)
1560 1581
1561 1582 if not revs:
1562 1583 return repo
1563 1584
1564 1585 if hiddentype == 'warn':
1565 1586 unfi = repo.unfiltered()
1566 1587 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1567 1588 repo.ui.warn(_("warning: accessing hidden changesets for write "
1568 1589 "operation: %s\n") % revstr)
1569 1590
1570 1591 # we have to use new filtername to separate branch/tags cache until we can
1571 1592 # disbale these cache when revisions are dynamically pinned.
1572 1593 return repo.filtered('visible-hidden', revs)
1573 1594
1574 1595 def _getrevsfromsymbols(repo, symbols):
1575 1596 """parse the list of symbols and returns a set of revision numbers of hidden
1576 1597 changesets present in symbols"""
1577 1598 revs = set()
1578 1599 unfi = repo.unfiltered()
1579 1600 unficl = unfi.changelog
1580 1601 cl = repo.changelog
1581 1602 tiprev = len(unficl)
1582 1603 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1583 1604 for s in symbols:
1584 1605 try:
1585 1606 n = int(s)
1586 1607 if n <= tiprev:
1587 1608 if not allowrevnums:
1588 1609 continue
1589 1610 else:
1590 1611 if n not in cl:
1591 1612 revs.add(n)
1592 1613 continue
1593 1614 except ValueError:
1594 1615 pass
1595 1616
1596 1617 try:
1597 1618 s = resolvehexnodeidprefix(unfi, s)
1598 1619 except (error.LookupError, error.WdirUnsupported):
1599 1620 s = None
1600 1621
1601 1622 if s is not None:
1602 1623 rev = unficl.rev(s)
1603 1624 if rev not in cl:
1604 1625 revs.add(rev)
1605 1626
1606 1627 return revs
1607 1628
1608 1629 def bookmarkrevs(repo, mark):
1609 1630 """
1610 1631 Select revisions reachable by a given bookmark
1611 1632 """
1612 1633 return repo.revs("ancestors(bookmark(%s)) - "
1613 1634 "ancestors(head() and not bookmark(%s)) - "
1614 1635 "ancestors(bookmark() and not bookmark(%s))",
1615 1636 mark, mark, mark)
@@ -1,1869 +1,1873 b''
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import getpass
14 14 import inspect
15 15 import os
16 16 import re
17 17 import signal
18 18 import socket
19 19 import subprocess
20 20 import sys
21 21 import traceback
22 22
23 23 from .i18n import _
24 24 from .node import hex
25 25
26 26 from . import (
27 27 color,
28 28 config,
29 29 configitems,
30 30 encoding,
31 31 error,
32 32 formatter,
33 33 progress,
34 34 pycompat,
35 35 rcutil,
36 36 scmutil,
37 37 util,
38 38 )
39 39 from .utils import (
40 40 dateutil,
41 41 procutil,
42 42 stringutil,
43 43 )
44 44
45 45 urlreq = util.urlreq
46 46
47 47 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
48 48 _keepalnum = ''.join(c for c in map(pycompat.bytechr, range(256))
49 49 if not c.isalnum())
50 50
51 51 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
52 52 tweakrc = b"""
53 53 [ui]
54 54 # The rollback command is dangerous. As a rule, don't use it.
55 55 rollback = False
56 56 # Make `hg status` report copy information
57 57 statuscopies = yes
58 58 # Prefer curses UIs when available. Revert to plain-text with `text`.
59 59 interface = curses
60 60
61 61 [commands]
62 62 # Make `hg status` emit cwd-relative paths by default.
63 63 status.relative = yes
64 64 # Refuse to perform an `hg update` that would cause a file content merge
65 65 update.check = noconflict
66 66 # Show conflicts information in `hg status`
67 67 status.verbose = True
68 68 # Collapse entire directories that contain only unknown files
69 69 status.terse = u
70 70
71 71 [diff]
72 72 git = 1
73 73 showfunc = 1
74 74 """
75 75
76 76 samplehgrcs = {
77 77 'user':
78 78 b"""# example user config (see 'hg help config' for more info)
79 79 [ui]
80 80 # name and email, e.g.
81 81 # username = Jane Doe <jdoe@example.com>
82 82 username =
83 83
84 84 # We recommend enabling tweakdefaults to get slight improvements to
85 85 # the UI over time. Make sure to set HGPLAIN in the environment when
86 86 # writing scripts!
87 87 # tweakdefaults = True
88 88
89 89 # uncomment to disable color in command output
90 90 # (see 'hg help color' for details)
91 91 # color = never
92 92
93 93 # uncomment to disable command output pagination
94 94 # (see 'hg help pager' for details)
95 95 # paginate = never
96 96
97 97 [extensions]
98 98 # uncomment these lines to enable some popular extensions
99 99 # (see 'hg help extensions' for more info)
100 100 #
101 101 # churn =
102 102 """,
103 103
104 104 'cloned':
105 105 b"""# example repository config (see 'hg help config' for more info)
106 106 [paths]
107 107 default = %s
108 108
109 109 # path aliases to other clones of this repo in URLs or filesystem paths
110 110 # (see 'hg help config.paths' for more info)
111 111 #
112 112 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
113 113 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
114 114 # my-clone = /home/jdoe/jdoes-clone
115 115
116 116 [ui]
117 117 # name and email (local to this repository, optional), e.g.
118 118 # username = Jane Doe <jdoe@example.com>
119 119 """,
120 120
121 121 'local':
122 122 b"""# example repository config (see 'hg help config' for more info)
123 123 [paths]
124 124 # path aliases to other clones of this repo in URLs or filesystem paths
125 125 # (see 'hg help config.paths' for more info)
126 126 #
127 127 # default = http://example.com/hg/example-repo
128 128 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
129 129 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
130 130 # my-clone = /home/jdoe/jdoes-clone
131 131
132 132 [ui]
133 133 # name and email (local to this repository, optional), e.g.
134 134 # username = Jane Doe <jdoe@example.com>
135 135 """,
136 136
137 137 'global':
138 138 b"""# example system-wide hg config (see 'hg help config' for more info)
139 139
140 140 [ui]
141 141 # uncomment to disable color in command output
142 142 # (see 'hg help color' for details)
143 143 # color = never
144 144
145 145 # uncomment to disable command output pagination
146 146 # (see 'hg help pager' for details)
147 147 # paginate = never
148 148
149 149 [extensions]
150 150 # uncomment these lines to enable some popular extensions
151 151 # (see 'hg help extensions' for more info)
152 152 #
153 153 # blackbox =
154 154 # churn =
155 155 """,
156 156 }
157 157
158 158 def _maybestrurl(maybebytes):
159 159 return util.rapply(pycompat.strurl, maybebytes)
160 160
161 161 def _maybebytesurl(maybestr):
162 162 return util.rapply(pycompat.bytesurl, maybestr)
163 163
164 164 class httppasswordmgrdbproxy(object):
165 165 """Delays loading urllib2 until it's needed."""
166 166 def __init__(self):
167 167 self._mgr = None
168 168
169 169 def _get_mgr(self):
170 170 if self._mgr is None:
171 171 self._mgr = urlreq.httppasswordmgrwithdefaultrealm()
172 172 return self._mgr
173 173
174 174 def add_password(self, realm, uris, user, passwd):
175 175 return self._get_mgr().add_password(
176 176 _maybestrurl(realm), _maybestrurl(uris),
177 177 _maybestrurl(user), _maybestrurl(passwd))
178 178
179 179 def find_user_password(self, realm, uri):
180 180 mgr = self._get_mgr()
181 181 return _maybebytesurl(mgr.find_user_password(_maybestrurl(realm),
182 182 _maybestrurl(uri)))
183 183
184 184 def _catchterm(*args):
185 185 raise error.SignalInterrupt
186 186
187 187 # unique object used to detect no default value has been provided when
188 188 # retrieving configuration value.
189 189 _unset = object()
190 190
191 191 # _reqexithandlers: callbacks run at the end of a request
192 192 _reqexithandlers = []
193 193
194 194 class ui(object):
195 195 def __init__(self, src=None):
196 196 """Create a fresh new ui object if no src given
197 197
198 198 Use uimod.ui.load() to create a ui which knows global and user configs.
199 199 In most cases, you should use ui.copy() to create a copy of an existing
200 200 ui object.
201 201 """
202 202 # _buffers: used for temporary capture of output
203 203 self._buffers = []
204 204 # 3-tuple describing how each buffer in the stack behaves.
205 205 # Values are (capture stderr, capture subprocesses, apply labels).
206 206 self._bufferstates = []
207 207 # When a buffer is active, defines whether we are expanding labels.
208 208 # This exists to prevent an extra list lookup.
209 209 self._bufferapplylabels = None
210 210 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
211 211 self._reportuntrusted = True
212 212 self._knownconfig = configitems.coreitems
213 213 self._ocfg = config.config() # overlay
214 214 self._tcfg = config.config() # trusted
215 215 self._ucfg = config.config() # untrusted
216 216 self._trustusers = set()
217 217 self._trustgroups = set()
218 218 self.callhooks = True
219 219 # Insecure server connections requested.
220 220 self.insecureconnections = False
221 221 # Blocked time
222 222 self.logblockedtimes = False
223 223 # color mode: see mercurial/color.py for possible value
224 224 self._colormode = None
225 225 self._terminfoparams = {}
226 226 self._styles = {}
227 227
228 228 if src:
229 229 self.fout = src.fout
230 230 self.ferr = src.ferr
231 231 self.fin = src.fin
232 232 self.pageractive = src.pageractive
233 233 self._disablepager = src._disablepager
234 234 self._tweaked = src._tweaked
235 235
236 236 self._tcfg = src._tcfg.copy()
237 237 self._ucfg = src._ucfg.copy()
238 238 self._ocfg = src._ocfg.copy()
239 239 self._trustusers = src._trustusers.copy()
240 240 self._trustgroups = src._trustgroups.copy()
241 241 self.environ = src.environ
242 242 self.callhooks = src.callhooks
243 243 self.insecureconnections = src.insecureconnections
244 244 self._colormode = src._colormode
245 245 self._terminfoparams = src._terminfoparams.copy()
246 246 self._styles = src._styles.copy()
247 247
248 248 self.fixconfig()
249 249
250 250 self.httppasswordmgrdb = src.httppasswordmgrdb
251 251 self._blockedtimes = src._blockedtimes
252 252 else:
253 253 self.fout = procutil.stdout
254 254 self.ferr = procutil.stderr
255 255 self.fin = procutil.stdin
256 256 self.pageractive = False
257 257 self._disablepager = False
258 258 self._tweaked = False
259 259
260 260 # shared read-only environment
261 261 self.environ = encoding.environ
262 262
263 263 self.httppasswordmgrdb = httppasswordmgrdbproxy()
264 264 self._blockedtimes = collections.defaultdict(int)
265 265
266 266 allowed = self.configlist('experimental', 'exportableenviron')
267 267 if '*' in allowed:
268 268 self._exportableenviron = self.environ
269 269 else:
270 270 self._exportableenviron = {}
271 271 for k in allowed:
272 272 if k in self.environ:
273 273 self._exportableenviron[k] = self.environ[k]
274 274
275 275 @classmethod
276 276 def load(cls):
277 277 """Create a ui and load global and user configs"""
278 278 u = cls()
279 279 # we always trust global config files and environment variables
280 280 for t, f in rcutil.rccomponents():
281 281 if t == 'path':
282 282 u.readconfig(f, trust=True)
283 283 elif t == 'items':
284 284 sections = set()
285 285 for section, name, value, source in f:
286 286 # do not set u._ocfg
287 287 # XXX clean this up once immutable config object is a thing
288 288 u._tcfg.set(section, name, value, source)
289 289 u._ucfg.set(section, name, value, source)
290 290 sections.add(section)
291 291 for section in sections:
292 292 u.fixconfig(section=section)
293 293 else:
294 294 raise error.ProgrammingError('unknown rctype: %s' % t)
295 295 u._maybetweakdefaults()
296 296 return u
297 297
298 298 def _maybetweakdefaults(self):
299 299 if not self.configbool('ui', 'tweakdefaults'):
300 300 return
301 301 if self._tweaked or self.plain('tweakdefaults'):
302 302 return
303 303
304 304 # Note: it is SUPER IMPORTANT that you set self._tweaked to
305 305 # True *before* any calls to setconfig(), otherwise you'll get
306 306 # infinite recursion between setconfig and this method.
307 307 #
308 308 # TODO: We should extract an inner method in setconfig() to
309 309 # avoid this weirdness.
310 310 self._tweaked = True
311 311 tmpcfg = config.config()
312 312 tmpcfg.parse('<tweakdefaults>', tweakrc)
313 313 for section in tmpcfg:
314 314 for name, value in tmpcfg.items(section):
315 315 if not self.hasconfig(section, name):
316 316 self.setconfig(section, name, value, "<tweakdefaults>")
317 317
318 318 def copy(self):
319 319 return self.__class__(self)
320 320
321 321 def resetstate(self):
322 322 """Clear internal state that shouldn't persist across commands"""
323 323 if self._progbar:
324 324 self._progbar.resetstate() # reset last-print time of progress bar
325 325 self.httppasswordmgrdb = httppasswordmgrdbproxy()
326 326
327 327 @contextlib.contextmanager
328 328 def timeblockedsection(self, key):
329 329 # this is open-coded below - search for timeblockedsection to find them
330 330 starttime = util.timer()
331 331 try:
332 332 yield
333 333 finally:
334 334 self._blockedtimes[key + '_blocked'] += \
335 335 (util.timer() - starttime) * 1000
336 336
337 337 def formatter(self, topic, opts):
338 338 return formatter.formatter(self, self, topic, opts)
339 339
340 340 def _trusted(self, fp, f):
341 341 st = util.fstat(fp)
342 342 if util.isowner(st):
343 343 return True
344 344
345 345 tusers, tgroups = self._trustusers, self._trustgroups
346 346 if '*' in tusers or '*' in tgroups:
347 347 return True
348 348
349 349 user = util.username(st.st_uid)
350 350 group = util.groupname(st.st_gid)
351 351 if user in tusers or group in tgroups or user == util.username():
352 352 return True
353 353
354 354 if self._reportuntrusted:
355 355 self.warn(_('not trusting file %s from untrusted '
356 356 'user %s, group %s\n') % (f, user, group))
357 357 return False
358 358
359 359 def readconfig(self, filename, root=None, trust=False,
360 360 sections=None, remap=None):
361 361 try:
362 362 fp = open(filename, u'rb')
363 363 except IOError:
364 364 if not sections: # ignore unless we were looking for something
365 365 return
366 366 raise
367 367
368 368 cfg = config.config()
369 369 trusted = sections or trust or self._trusted(fp, filename)
370 370
371 371 try:
372 372 cfg.read(filename, fp, sections=sections, remap=remap)
373 373 fp.close()
374 374 except error.ConfigError as inst:
375 375 if trusted:
376 376 raise
377 377 self.warn(_("ignored: %s\n") % stringutil.forcebytestr(inst))
378 378
379 379 if self.plain():
380 380 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
381 381 'logtemplate', 'statuscopies', 'style',
382 382 'traceback', 'verbose'):
383 383 if k in cfg['ui']:
384 384 del cfg['ui'][k]
385 385 for k, v in cfg.items('defaults'):
386 386 del cfg['defaults'][k]
387 387 for k, v in cfg.items('commands'):
388 388 del cfg['commands'][k]
389 389 # Don't remove aliases from the configuration if in the exceptionlist
390 390 if self.plain('alias'):
391 391 for k, v in cfg.items('alias'):
392 392 del cfg['alias'][k]
393 393 if self.plain('revsetalias'):
394 394 for k, v in cfg.items('revsetalias'):
395 395 del cfg['revsetalias'][k]
396 396 if self.plain('templatealias'):
397 397 for k, v in cfg.items('templatealias'):
398 398 del cfg['templatealias'][k]
399 399
400 400 if trusted:
401 401 self._tcfg.update(cfg)
402 402 self._tcfg.update(self._ocfg)
403 403 self._ucfg.update(cfg)
404 404 self._ucfg.update(self._ocfg)
405 405
406 406 if root is None:
407 407 root = os.path.expanduser('~')
408 408 self.fixconfig(root=root)
409 409
410 410 def fixconfig(self, root=None, section=None):
411 411 if section in (None, 'paths'):
412 412 # expand vars and ~
413 413 # translate paths relative to root (or home) into absolute paths
414 414 root = root or pycompat.getcwd()
415 415 for c in self._tcfg, self._ucfg, self._ocfg:
416 416 for n, p in c.items('paths'):
417 417 # Ignore sub-options.
418 418 if ':' in n:
419 419 continue
420 420 if not p:
421 421 continue
422 422 if '%%' in p:
423 423 s = self.configsource('paths', n) or 'none'
424 424 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
425 425 % (n, p, s))
426 426 p = p.replace('%%', '%')
427 427 p = util.expandpath(p)
428 428 if not util.hasscheme(p) and not os.path.isabs(p):
429 429 p = os.path.normpath(os.path.join(root, p))
430 430 c.set("paths", n, p)
431 431
432 432 if section in (None, 'ui'):
433 433 # update ui options
434 434 self.debugflag = self.configbool('ui', 'debug')
435 435 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
436 436 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
437 437 if self.verbose and self.quiet:
438 438 self.quiet = self.verbose = False
439 439 self._reportuntrusted = self.debugflag or self.configbool("ui",
440 440 "report_untrusted")
441 441 self.tracebackflag = self.configbool('ui', 'traceback')
442 442 self.logblockedtimes = self.configbool('ui', 'logblockedtimes')
443 443
444 444 if section in (None, 'trusted'):
445 445 # update trust information
446 446 self._trustusers.update(self.configlist('trusted', 'users'))
447 447 self._trustgroups.update(self.configlist('trusted', 'groups'))
448 448
449 449 def backupconfig(self, section, item):
450 450 return (self._ocfg.backup(section, item),
451 451 self._tcfg.backup(section, item),
452 452 self._ucfg.backup(section, item),)
453 453 def restoreconfig(self, data):
454 454 self._ocfg.restore(data[0])
455 455 self._tcfg.restore(data[1])
456 456 self._ucfg.restore(data[2])
457 457
458 458 def setconfig(self, section, name, value, source=''):
459 459 for cfg in (self._ocfg, self._tcfg, self._ucfg):
460 460 cfg.set(section, name, value, source)
461 461 self.fixconfig(section=section)
462 462 self._maybetweakdefaults()
463 463
464 464 def _data(self, untrusted):
465 465 return untrusted and self._ucfg or self._tcfg
466 466
467 467 def configsource(self, section, name, untrusted=False):
468 468 return self._data(untrusted).source(section, name)
469 469
470 470 def config(self, section, name, default=_unset, untrusted=False):
471 471 """return the plain string version of a config"""
472 472 value = self._config(section, name, default=default,
473 473 untrusted=untrusted)
474 474 if value is _unset:
475 475 return None
476 476 return value
477 477
478 478 def _config(self, section, name, default=_unset, untrusted=False):
479 479 value = itemdefault = default
480 480 item = self._knownconfig.get(section, {}).get(name)
481 481 alternates = [(section, name)]
482 482
483 483 if item is not None:
484 484 alternates.extend(item.alias)
485 485 if callable(item.default):
486 486 itemdefault = item.default()
487 487 else:
488 488 itemdefault = item.default
489 489 else:
490 490 msg = ("accessing unregistered config item: '%s.%s'")
491 491 msg %= (section, name)
492 492 self.develwarn(msg, 2, 'warn-config-unknown')
493 493
494 494 if default is _unset:
495 495 if item is None:
496 496 value = default
497 497 elif item.default is configitems.dynamicdefault:
498 498 value = None
499 499 msg = "config item requires an explicit default value: '%s.%s'"
500 500 msg %= (section, name)
501 501 self.develwarn(msg, 2, 'warn-config-default')
502 502 else:
503 503 value = itemdefault
504 504 elif (item is not None
505 505 and item.default is not configitems.dynamicdefault
506 506 and default != itemdefault):
507 507 msg = ("specifying a mismatched default value for a registered "
508 508 "config item: '%s.%s' '%s'")
509 509 msg %= (section, name, pycompat.bytestr(default))
510 510 self.develwarn(msg, 2, 'warn-config-default')
511 511
512 512 for s, n in alternates:
513 513 candidate = self._data(untrusted).get(s, n, None)
514 514 if candidate is not None:
515 515 value = candidate
516 516 section = s
517 517 name = n
518 518 break
519 519
520 520 if self.debugflag and not untrusted and self._reportuntrusted:
521 521 for s, n in alternates:
522 522 uvalue = self._ucfg.get(s, n)
523 523 if uvalue is not None and uvalue != value:
524 524 self.debug("ignoring untrusted configuration option "
525 525 "%s.%s = %s\n" % (s, n, uvalue))
526 526 return value
527 527
528 528 def configsuboptions(self, section, name, default=_unset, untrusted=False):
529 529 """Get a config option and all sub-options.
530 530
531 531 Some config options have sub-options that are declared with the
532 532 format "key:opt = value". This method is used to return the main
533 533 option and all its declared sub-options.
534 534
535 535 Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
536 536 is a dict of defined sub-options where keys and values are strings.
537 537 """
538 538 main = self.config(section, name, default, untrusted=untrusted)
539 539 data = self._data(untrusted)
540 540 sub = {}
541 541 prefix = '%s:' % name
542 542 for k, v in data.items(section):
543 543 if k.startswith(prefix):
544 544 sub[k[len(prefix):]] = v
545 545
546 546 if self.debugflag and not untrusted and self._reportuntrusted:
547 547 for k, v in sub.items():
548 548 uvalue = self._ucfg.get(section, '%s:%s' % (name, k))
549 549 if uvalue is not None and uvalue != v:
550 550 self.debug('ignoring untrusted configuration option '
551 551 '%s:%s.%s = %s\n' % (section, name, k, uvalue))
552 552
553 553 return main, sub
554 554
555 555 def configpath(self, section, name, default=_unset, untrusted=False):
556 556 'get a path config item, expanded relative to repo root or config file'
557 557 v = self.config(section, name, default, untrusted)
558 558 if v is None:
559 559 return None
560 560 if not os.path.isabs(v) or "://" not in v:
561 561 src = self.configsource(section, name, untrusted)
562 562 if ':' in src:
563 563 base = os.path.dirname(src.rsplit(':')[0])
564 564 v = os.path.join(base, os.path.expanduser(v))
565 565 return v
566 566
567 567 def configbool(self, section, name, default=_unset, untrusted=False):
568 568 """parse a configuration element as a boolean
569 569
570 570 >>> u = ui(); s = b'foo'
571 571 >>> u.setconfig(s, b'true', b'yes')
572 572 >>> u.configbool(s, b'true')
573 573 True
574 574 >>> u.setconfig(s, b'false', b'no')
575 575 >>> u.configbool(s, b'false')
576 576 False
577 577 >>> u.configbool(s, b'unknown')
578 578 False
579 579 >>> u.configbool(s, b'unknown', True)
580 580 True
581 581 >>> u.setconfig(s, b'invalid', b'somevalue')
582 582 >>> u.configbool(s, b'invalid')
583 583 Traceback (most recent call last):
584 584 ...
585 585 ConfigError: foo.invalid is not a boolean ('somevalue')
586 586 """
587 587
588 588 v = self._config(section, name, default, untrusted=untrusted)
589 589 if v is None:
590 590 return v
591 591 if v is _unset:
592 592 if default is _unset:
593 593 return False
594 594 return default
595 595 if isinstance(v, bool):
596 596 return v
597 597 b = stringutil.parsebool(v)
598 598 if b is None:
599 599 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
600 600 % (section, name, v))
601 601 return b
602 602
603 603 def configwith(self, convert, section, name, default=_unset,
604 604 desc=None, untrusted=False):
605 605 """parse a configuration element with a conversion function
606 606
607 607 >>> u = ui(); s = b'foo'
608 608 >>> u.setconfig(s, b'float1', b'42')
609 609 >>> u.configwith(float, s, b'float1')
610 610 42.0
611 611 >>> u.setconfig(s, b'float2', b'-4.25')
612 612 >>> u.configwith(float, s, b'float2')
613 613 -4.25
614 614 >>> u.configwith(float, s, b'unknown', 7)
615 615 7.0
616 616 >>> u.setconfig(s, b'invalid', b'somevalue')
617 617 >>> u.configwith(float, s, b'invalid')
618 618 Traceback (most recent call last):
619 619 ...
620 620 ConfigError: foo.invalid is not a valid float ('somevalue')
621 621 >>> u.configwith(float, s, b'invalid', desc=b'womble')
622 622 Traceback (most recent call last):
623 623 ...
624 624 ConfigError: foo.invalid is not a valid womble ('somevalue')
625 625 """
626 626
627 627 v = self.config(section, name, default, untrusted)
628 628 if v is None:
629 629 return v # do not attempt to convert None
630 630 try:
631 631 return convert(v)
632 632 except (ValueError, error.ParseError):
633 633 if desc is None:
634 634 desc = pycompat.sysbytes(convert.__name__)
635 635 raise error.ConfigError(_("%s.%s is not a valid %s ('%s')")
636 636 % (section, name, desc, v))
637 637
638 638 def configint(self, section, name, default=_unset, untrusted=False):
639 639 """parse a configuration element as an integer
640 640
641 641 >>> u = ui(); s = b'foo'
642 642 >>> u.setconfig(s, b'int1', b'42')
643 643 >>> u.configint(s, b'int1')
644 644 42
645 645 >>> u.setconfig(s, b'int2', b'-42')
646 646 >>> u.configint(s, b'int2')
647 647 -42
648 648 >>> u.configint(s, b'unknown', 7)
649 649 7
650 650 >>> u.setconfig(s, b'invalid', b'somevalue')
651 651 >>> u.configint(s, b'invalid')
652 652 Traceback (most recent call last):
653 653 ...
654 654 ConfigError: foo.invalid is not a valid integer ('somevalue')
655 655 """
656 656
657 657 return self.configwith(int, section, name, default, 'integer',
658 658 untrusted)
659 659
660 660 def configbytes(self, section, name, default=_unset, untrusted=False):
661 661 """parse a configuration element as a quantity in bytes
662 662
663 663 Units can be specified as b (bytes), k or kb (kilobytes), m or
664 664 mb (megabytes), g or gb (gigabytes).
665 665
666 666 >>> u = ui(); s = b'foo'
667 667 >>> u.setconfig(s, b'val1', b'42')
668 668 >>> u.configbytes(s, b'val1')
669 669 42
670 670 >>> u.setconfig(s, b'val2', b'42.5 kb')
671 671 >>> u.configbytes(s, b'val2')
672 672 43520
673 673 >>> u.configbytes(s, b'unknown', b'7 MB')
674 674 7340032
675 675 >>> u.setconfig(s, b'invalid', b'somevalue')
676 676 >>> u.configbytes(s, b'invalid')
677 677 Traceback (most recent call last):
678 678 ...
679 679 ConfigError: foo.invalid is not a byte quantity ('somevalue')
680 680 """
681 681
682 682 value = self._config(section, name, default, untrusted)
683 683 if value is _unset:
684 684 if default is _unset:
685 685 default = 0
686 686 value = default
687 687 if not isinstance(value, bytes):
688 688 return value
689 689 try:
690 690 return util.sizetoint(value)
691 691 except error.ParseError:
692 692 raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
693 693 % (section, name, value))
694 694
695 695 def configlist(self, section, name, default=_unset, untrusted=False):
696 696 """parse a configuration element as a list of comma/space separated
697 697 strings
698 698
699 699 >>> u = ui(); s = b'foo'
700 700 >>> u.setconfig(s, b'list1', b'this,is "a small" ,test')
701 701 >>> u.configlist(s, b'list1')
702 702 ['this', 'is', 'a small', 'test']
703 703 >>> u.setconfig(s, b'list2', b'this, is "a small" , test ')
704 704 >>> u.configlist(s, b'list2')
705 705 ['this', 'is', 'a small', 'test']
706 706 """
707 707 # default is not always a list
708 708 v = self.configwith(config.parselist, section, name, default,
709 709 'list', untrusted)
710 710 if isinstance(v, bytes):
711 711 return config.parselist(v)
712 712 elif v is None:
713 713 return []
714 714 return v
715 715
716 716 def configdate(self, section, name, default=_unset, untrusted=False):
717 717 """parse a configuration element as a tuple of ints
718 718
719 719 >>> u = ui(); s = b'foo'
720 720 >>> u.setconfig(s, b'date', b'0 0')
721 721 >>> u.configdate(s, b'date')
722 722 (0, 0)
723 723 """
724 724 if self.config(section, name, default, untrusted):
725 725 return self.configwith(dateutil.parsedate, section, name, default,
726 726 'date', untrusted)
727 727 if default is _unset:
728 728 return None
729 729 return default
730 730
731 731 def hasconfig(self, section, name, untrusted=False):
732 732 return self._data(untrusted).hasitem(section, name)
733 733
734 734 def has_section(self, section, untrusted=False):
735 735 '''tell whether section exists in config.'''
736 736 return section in self._data(untrusted)
737 737
738 738 def configitems(self, section, untrusted=False, ignoresub=False):
739 739 items = self._data(untrusted).items(section)
740 740 if ignoresub:
741 741 items = [i for i in items if ':' not in i[0]]
742 742 if self.debugflag and not untrusted and self._reportuntrusted:
743 743 for k, v in self._ucfg.items(section):
744 744 if self._tcfg.get(section, k) != v:
745 745 self.debug("ignoring untrusted configuration option "
746 746 "%s.%s = %s\n" % (section, k, v))
747 747 return items
748 748
749 749 def walkconfig(self, untrusted=False):
750 750 cfg = self._data(untrusted)
751 751 for section in cfg.sections():
752 752 for name, value in self.configitems(section, untrusted):
753 753 yield section, name, value
754 754
755 755 def plain(self, feature=None):
756 756 '''is plain mode active?
757 757
758 758 Plain mode means that all configuration variables which affect
759 759 the behavior and output of Mercurial should be
760 760 ignored. Additionally, the output should be stable,
761 761 reproducible and suitable for use in scripts or applications.
762 762
763 763 The only way to trigger plain mode is by setting either the
764 764 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
765 765
766 766 The return value can either be
767 767 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
768 768 - False if feature is disabled by default and not included in HGPLAIN
769 769 - True otherwise
770 770 '''
771 771 if ('HGPLAIN' not in encoding.environ and
772 772 'HGPLAINEXCEPT' not in encoding.environ):
773 773 return False
774 774 exceptions = encoding.environ.get('HGPLAINEXCEPT',
775 775 '').strip().split(',')
776 776 # TODO: add support for HGPLAIN=+feature,-feature syntax
777 777 if '+strictflags' not in encoding.environ.get('HGPLAIN', '').split(','):
778 778 exceptions.append('strictflags')
779 779 if feature and exceptions:
780 780 return feature not in exceptions
781 781 return True
782 782
783 783 def username(self, acceptempty=False):
784 784 """Return default username to be used in commits.
785 785
786 786 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
787 787 and stop searching if one of these is set.
788 788 If not found and acceptempty is True, returns None.
789 789 If not found and ui.askusername is True, ask the user, else use
790 790 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
791 791 If no username could be found, raise an Abort error.
792 792 """
793 793 user = encoding.environ.get("HGUSER")
794 794 if user is None:
795 795 user = self.config("ui", "username")
796 796 if user is not None:
797 797 user = os.path.expandvars(user)
798 798 if user is None:
799 799 user = encoding.environ.get("EMAIL")
800 800 if user is None and acceptempty:
801 801 return user
802 802 if user is None and self.configbool("ui", "askusername"):
803 803 user = self.prompt(_("enter a commit username:"), default=None)
804 804 if user is None and not self.interactive():
805 805 try:
806 806 user = '%s@%s' % (procutil.getuser(),
807 807 encoding.strtolocal(socket.getfqdn()))
808 808 self.warn(_("no username found, using '%s' instead\n") % user)
809 809 except KeyError:
810 810 pass
811 811 if not user:
812 812 raise error.Abort(_('no username supplied'),
813 813 hint=_("use 'hg config --edit' "
814 814 'to set your username'))
815 815 if "\n" in user:
816 816 raise error.Abort(_("username %r contains a newline\n")
817 817 % pycompat.bytestr(user))
818 818 return user
819 819
820 820 def shortuser(self, user):
821 821 """Return a short representation of a user name or email address."""
822 822 if not self.verbose:
823 823 user = stringutil.shortuser(user)
824 824 return user
825 825
826 826 def expandpath(self, loc, default=None):
827 827 """Return repository location relative to cwd or from [paths]"""
828 828 try:
829 829 p = self.paths.getpath(loc)
830 830 if p:
831 831 return p.rawloc
832 832 except error.RepoError:
833 833 pass
834 834
835 835 if default:
836 836 try:
837 837 p = self.paths.getpath(default)
838 838 if p:
839 839 return p.rawloc
840 840 except error.RepoError:
841 841 pass
842 842
843 843 return loc
844 844
845 845 @util.propertycache
846 846 def paths(self):
847 847 return paths(self)
848 848
849 849 def pushbuffer(self, error=False, subproc=False, labeled=False):
850 850 """install a buffer to capture standard output of the ui object
851 851
852 852 If error is True, the error output will be captured too.
853 853
854 854 If subproc is True, output from subprocesses (typically hooks) will be
855 855 captured too.
856 856
857 857 If labeled is True, any labels associated with buffered
858 858 output will be handled. By default, this has no effect
859 859 on the output returned, but extensions and GUI tools may
860 860 handle this argument and returned styled output. If output
861 861 is being buffered so it can be captured and parsed or
862 862 processed, labeled should not be set to True.
863 863 """
864 864 self._buffers.append([])
865 865 self._bufferstates.append((error, subproc, labeled))
866 866 self._bufferapplylabels = labeled
867 867
868 868 def popbuffer(self):
869 869 '''pop the last buffer and return the buffered output'''
870 870 self._bufferstates.pop()
871 871 if self._bufferstates:
872 872 self._bufferapplylabels = self._bufferstates[-1][2]
873 873 else:
874 874 self._bufferapplylabels = None
875 875
876 876 return "".join(self._buffers.pop())
877 877
878 878 def canwritewithoutlabels(self):
879 879 '''check if write skips the label'''
880 880 if self._buffers and not self._bufferapplylabels:
881 881 return True
882 882 return self._colormode is None
883 883
884 884 def canbatchlabeledwrites(self):
885 885 '''check if write calls with labels are batchable'''
886 886 # Windows color printing is special, see ``write``.
887 887 return self._colormode != 'win32'
888 888
889 889 def write(self, *args, **opts):
890 890 '''write args to output
891 891
892 892 By default, this method simply writes to the buffer or stdout.
893 893 Color mode can be set on the UI class to have the output decorated
894 894 with color modifier before being written to stdout.
895 895
896 896 The color used is controlled by an optional keyword argument, "label".
897 897 This should be a string containing label names separated by space.
898 898 Label names take the form of "topic.type". For example, ui.debug()
899 899 issues a label of "ui.debug".
900 900
901 901 When labeling output for a specific command, a label of
902 902 "cmdname.type" is recommended. For example, status issues
903 903 a label of "status.modified" for modified files.
904 904 '''
905 905 if self._buffers:
906 906 if self._bufferapplylabels:
907 907 label = opts.get(r'label', '')
908 908 self._buffers[-1].extend(self.label(a, label) for a in args)
909 909 else:
910 910 self._buffers[-1].extend(args)
911 911 else:
912 912 self._writenobuf(*args, **opts)
913 913
914 914 def _writenobuf(self, *args, **opts):
915 915 if self._colormode == 'win32':
916 916 # windows color printing is its own can of crab, defer to
917 917 # the color module and that is it.
918 918 color.win32print(self, self._write, *args, **opts)
919 919 else:
920 920 msgs = args
921 921 if self._colormode is not None:
922 922 label = opts.get(r'label', '')
923 923 msgs = [self.label(a, label) for a in args]
924 924 self._write(*msgs, **opts)
925 925
926 926 def _write(self, *msgs, **opts):
927 927 self._progclear()
928 928 # opencode timeblockedsection because this is a critical path
929 929 starttime = util.timer()
930 930 try:
931 931 self.fout.write(''.join(msgs))
932 932 except IOError as err:
933 933 raise error.StdioError(err)
934 934 finally:
935 935 self._blockedtimes['stdio_blocked'] += \
936 936 (util.timer() - starttime) * 1000
937 937
938 938 def write_err(self, *args, **opts):
939 939 self._progclear()
940 940 if self._bufferstates and self._bufferstates[-1][0]:
941 941 self.write(*args, **opts)
942 942 elif self._colormode == 'win32':
943 943 # windows color printing is its own can of crab, defer to
944 944 # the color module and that is it.
945 945 color.win32print(self, self._write_err, *args, **opts)
946 946 else:
947 947 msgs = args
948 948 if self._colormode is not None:
949 949 label = opts.get(r'label', '')
950 950 msgs = [self.label(a, label) for a in args]
951 951 self._write_err(*msgs, **opts)
952 952
953 953 def _write_err(self, *msgs, **opts):
954 954 try:
955 955 with self.timeblockedsection('stdio'):
956 956 if not getattr(self.fout, 'closed', False):
957 957 self.fout.flush()
958 958 for a in msgs:
959 959 self.ferr.write(a)
960 960 # stderr may be buffered under win32 when redirected to files,
961 961 # including stdout.
962 962 if not getattr(self.ferr, 'closed', False):
963 963 self.ferr.flush()
964 964 except IOError as inst:
965 965 if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
966 966 raise error.StdioError(inst)
967 967
968 968 def flush(self):
969 969 # opencode timeblockedsection because this is a critical path
970 970 starttime = util.timer()
971 971 try:
972 972 try:
973 973 self.fout.flush()
974 974 except IOError as err:
975 975 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
976 976 raise error.StdioError(err)
977 977 finally:
978 978 try:
979 979 self.ferr.flush()
980 980 except IOError as err:
981 981 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
982 982 raise error.StdioError(err)
983 983 finally:
984 984 self._blockedtimes['stdio_blocked'] += \
985 985 (util.timer() - starttime) * 1000
986 986
987 987 def _isatty(self, fh):
988 988 if self.configbool('ui', 'nontty'):
989 989 return False
990 990 return procutil.isatty(fh)
991 991
992 992 def disablepager(self):
993 993 self._disablepager = True
994 994
995 995 def pager(self, command):
996 996 """Start a pager for subsequent command output.
997 997
998 998 Commands which produce a long stream of output should call
999 999 this function to activate the user's preferred pagination
1000 1000 mechanism (which may be no pager). Calling this function
1001 1001 precludes any future use of interactive functionality, such as
1002 1002 prompting the user or activating curses.
1003 1003
1004 1004 Args:
1005 1005 command: The full, non-aliased name of the command. That is, "log"
1006 1006 not "history, "summary" not "summ", etc.
1007 1007 """
1008 1008 if (self._disablepager
1009 1009 or self.pageractive):
1010 1010 # how pager should do is already determined
1011 1011 return
1012 1012
1013 1013 if not command.startswith('internal-always-') and (
1014 1014 # explicit --pager=on (= 'internal-always-' prefix) should
1015 1015 # take precedence over disabling factors below
1016 1016 command in self.configlist('pager', 'ignore')
1017 1017 or not self.configbool('ui', 'paginate')
1018 1018 or not self.configbool('pager', 'attend-' + command, True)
1019 1019 # TODO: if we want to allow HGPLAINEXCEPT=pager,
1020 1020 # formatted() will need some adjustment.
1021 1021 or not self.formatted()
1022 1022 or self.plain()
1023 1023 or self._buffers
1024 1024 # TODO: expose debugger-enabled on the UI object
1025 1025 or '--debugger' in pycompat.sysargv):
1026 1026 # We only want to paginate if the ui appears to be
1027 1027 # interactive, the user didn't say HGPLAIN or
1028 1028 # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
1029 1029 return
1030 1030
1031 1031 pagercmd = self.config('pager', 'pager', rcutil.fallbackpager)
1032 1032 if not pagercmd:
1033 1033 return
1034 1034
1035 1035 pagerenv = {}
1036 1036 for name, value in rcutil.defaultpagerenv().items():
1037 1037 if name not in encoding.environ:
1038 1038 pagerenv[name] = value
1039 1039
1040 1040 self.debug('starting pager for command %r\n' % command)
1041 1041 self.flush()
1042 1042
1043 1043 wasformatted = self.formatted()
1044 1044 if util.safehasattr(signal, "SIGPIPE"):
1045 1045 signal.signal(signal.SIGPIPE, _catchterm)
1046 1046 if self._runpager(pagercmd, pagerenv):
1047 1047 self.pageractive = True
1048 1048 # Preserve the formatted-ness of the UI. This is important
1049 1049 # because we mess with stdout, which might confuse
1050 1050 # auto-detection of things being formatted.
1051 1051 self.setconfig('ui', 'formatted', wasformatted, 'pager')
1052 1052 self.setconfig('ui', 'interactive', False, 'pager')
1053 1053
1054 1054 # If pagermode differs from color.mode, reconfigure color now that
1055 1055 # pageractive is set.
1056 1056 cm = self._colormode
1057 1057 if cm != self.config('color', 'pagermode', cm):
1058 1058 color.setup(self)
1059 1059 else:
1060 1060 # If the pager can't be spawned in dispatch when --pager=on is
1061 1061 # given, don't try again when the command runs, to avoid a duplicate
1062 1062 # warning about a missing pager command.
1063 1063 self.disablepager()
1064 1064
1065 1065 def _runpager(self, command, env=None):
1066 1066 """Actually start the pager and set up file descriptors.
1067 1067
1068 1068 This is separate in part so that extensions (like chg) can
1069 1069 override how a pager is invoked.
1070 1070 """
1071 1071 if command == 'cat':
1072 1072 # Save ourselves some work.
1073 1073 return False
1074 1074 # If the command doesn't contain any of these characters, we
1075 1075 # assume it's a binary and exec it directly. This means for
1076 1076 # simple pager command configurations, we can degrade
1077 1077 # gracefully and tell the user about their broken pager.
1078 1078 shell = any(c in command for c in "|&;<>()$`\\\"' \t\n*?[#~=%")
1079 1079
1080 1080 if pycompat.iswindows and not shell:
1081 1081 # Window's built-in `more` cannot be invoked with shell=False, but
1082 1082 # its `more.com` can. Hide this implementation detail from the
1083 1083 # user so we can also get sane bad PAGER behavior. MSYS has
1084 1084 # `more.exe`, so do a cmd.exe style resolution of the executable to
1085 1085 # determine which one to use.
1086 1086 fullcmd = procutil.findexe(command)
1087 1087 if not fullcmd:
1088 1088 self.warn(_("missing pager command '%s', skipping pager\n")
1089 1089 % command)
1090 1090 return False
1091 1091
1092 1092 command = fullcmd
1093 1093
1094 1094 try:
1095 1095 pager = subprocess.Popen(
1096 1096 command, shell=shell, bufsize=-1,
1097 1097 close_fds=procutil.closefds, stdin=subprocess.PIPE,
1098 1098 stdout=procutil.stdout, stderr=procutil.stderr,
1099 1099 env=procutil.shellenviron(env))
1100 1100 except OSError as e:
1101 1101 if e.errno == errno.ENOENT and not shell:
1102 1102 self.warn(_("missing pager command '%s', skipping pager\n")
1103 1103 % command)
1104 1104 return False
1105 1105 raise
1106 1106
1107 1107 # back up original file descriptors
1108 1108 stdoutfd = os.dup(procutil.stdout.fileno())
1109 1109 stderrfd = os.dup(procutil.stderr.fileno())
1110 1110
1111 1111 os.dup2(pager.stdin.fileno(), procutil.stdout.fileno())
1112 1112 if self._isatty(procutil.stderr):
1113 1113 os.dup2(pager.stdin.fileno(), procutil.stderr.fileno())
1114 1114
1115 1115 @self.atexit
1116 1116 def killpager():
1117 1117 if util.safehasattr(signal, "SIGINT"):
1118 1118 signal.signal(signal.SIGINT, signal.SIG_IGN)
1119 1119 # restore original fds, closing pager.stdin copies in the process
1120 1120 os.dup2(stdoutfd, procutil.stdout.fileno())
1121 1121 os.dup2(stderrfd, procutil.stderr.fileno())
1122 1122 pager.stdin.close()
1123 1123 pager.wait()
1124 1124
1125 1125 return True
1126 1126
1127 1127 @property
1128 1128 def _exithandlers(self):
1129 1129 return _reqexithandlers
1130 1130
1131 1131 def atexit(self, func, *args, **kwargs):
1132 1132 '''register a function to run after dispatching a request
1133 1133
1134 1134 Handlers do not stay registered across request boundaries.'''
1135 1135 self._exithandlers.append((func, args, kwargs))
1136 1136 return func
1137 1137
1138 1138 def interface(self, feature):
1139 1139 """what interface to use for interactive console features?
1140 1140
1141 1141 The interface is controlled by the value of `ui.interface` but also by
1142 1142 the value of feature-specific configuration. For example:
1143 1143
1144 1144 ui.interface.histedit = text
1145 1145 ui.interface.chunkselector = curses
1146 1146
1147 1147 Here the features are "histedit" and "chunkselector".
1148 1148
1149 1149 The configuration above means that the default interfaces for commands
1150 1150 is curses, the interface for histedit is text and the interface for
1151 1151 selecting chunk is crecord (the best curses interface available).
1152 1152
1153 1153 Consider the following example:
1154 1154 ui.interface = curses
1155 1155 ui.interface.histedit = text
1156 1156
1157 1157 Then histedit will use the text interface and chunkselector will use
1158 1158 the default curses interface (crecord at the moment).
1159 1159 """
1160 1160 alldefaults = frozenset(["text", "curses"])
1161 1161
1162 1162 featureinterfaces = {
1163 1163 "chunkselector": [
1164 1164 "text",
1165 1165 "curses",
1166 1166 ]
1167 1167 }
1168 1168
1169 1169 # Feature-specific interface
1170 1170 if feature not in featureinterfaces.keys():
1171 1171 # Programming error, not user error
1172 1172 raise ValueError("Unknown feature requested %s" % feature)
1173 1173
1174 1174 availableinterfaces = frozenset(featureinterfaces[feature])
1175 1175 if alldefaults > availableinterfaces:
1176 1176 # Programming error, not user error. We need a use case to
1177 1177 # define the right thing to do here.
1178 1178 raise ValueError(
1179 1179 "Feature %s does not handle all default interfaces" %
1180 1180 feature)
1181 1181
1182 1182 if self.plain():
1183 1183 return "text"
1184 1184
1185 1185 # Default interface for all the features
1186 1186 defaultinterface = "text"
1187 1187 i = self.config("ui", "interface")
1188 1188 if i in alldefaults:
1189 1189 defaultinterface = i
1190 1190
1191 1191 choseninterface = defaultinterface
1192 1192 f = self.config("ui", "interface.%s" % feature)
1193 1193 if f in availableinterfaces:
1194 1194 choseninterface = f
1195 1195
1196 1196 if i is not None and defaultinterface != i:
1197 1197 if f is not None:
1198 1198 self.warn(_("invalid value for ui.interface: %s\n") %
1199 1199 (i,))
1200 1200 else:
1201 1201 self.warn(_("invalid value for ui.interface: %s (using %s)\n") %
1202 1202 (i, choseninterface))
1203 1203 if f is not None and choseninterface != f:
1204 1204 self.warn(_("invalid value for ui.interface.%s: %s (using %s)\n") %
1205 1205 (feature, f, choseninterface))
1206 1206
1207 1207 return choseninterface
1208 1208
1209 1209 def interactive(self):
1210 1210 '''is interactive input allowed?
1211 1211
1212 1212 An interactive session is a session where input can be reasonably read
1213 1213 from `sys.stdin'. If this function returns false, any attempt to read
1214 1214 from stdin should fail with an error, unless a sensible default has been
1215 1215 specified.
1216 1216
1217 1217 Interactiveness is triggered by the value of the `ui.interactive'
1218 1218 configuration variable or - if it is unset - when `sys.stdin' points
1219 1219 to a terminal device.
1220 1220
1221 1221 This function refers to input only; for output, see `ui.formatted()'.
1222 1222 '''
1223 1223 i = self.configbool("ui", "interactive")
1224 1224 if i is None:
1225 1225 # some environments replace stdin without implementing isatty
1226 1226 # usually those are non-interactive
1227 1227 return self._isatty(self.fin)
1228 1228
1229 1229 return i
1230 1230
1231 1231 def termwidth(self):
1232 1232 '''how wide is the terminal in columns?
1233 1233 '''
1234 1234 if 'COLUMNS' in encoding.environ:
1235 1235 try:
1236 1236 return int(encoding.environ['COLUMNS'])
1237 1237 except ValueError:
1238 1238 pass
1239 1239 return scmutil.termsize(self)[0]
1240 1240
1241 1241 def formatted(self):
1242 1242 '''should formatted output be used?
1243 1243
1244 1244 It is often desirable to format the output to suite the output medium.
1245 1245 Examples of this are truncating long lines or colorizing messages.
1246 1246 However, this is not often not desirable when piping output into other
1247 1247 utilities, e.g. `grep'.
1248 1248
1249 1249 Formatted output is triggered by the value of the `ui.formatted'
1250 1250 configuration variable or - if it is unset - when `sys.stdout' points
1251 1251 to a terminal device. Please note that `ui.formatted' should be
1252 1252 considered an implementation detail; it is not intended for use outside
1253 1253 Mercurial or its extensions.
1254 1254
1255 1255 This function refers to output only; for input, see `ui.interactive()'.
1256 1256 This function always returns false when in plain mode, see `ui.plain()'.
1257 1257 '''
1258 1258 if self.plain():
1259 1259 return False
1260 1260
1261 1261 i = self.configbool("ui", "formatted")
1262 1262 if i is None:
1263 1263 # some environments replace stdout without implementing isatty
1264 1264 # usually those are non-interactive
1265 1265 return self._isatty(self.fout)
1266 1266
1267 1267 return i
1268 1268
1269 1269 def _readline(self):
1270 1270 # Replacing stdin/stdout temporarily is a hard problem on Python 3
1271 1271 # because they have to be text streams with *no buffering*. Instead,
1272 1272 # we use rawinput() only if call_readline() will be invoked by
1273 1273 # PyOS_Readline(), so no I/O will be made at Python layer.
1274 1274 usereadline = (self._isatty(self.fin) and self._isatty(self.fout)
1275 1275 and procutil.isstdin(self.fin)
1276 1276 and procutil.isstdout(self.fout))
1277 1277 if usereadline:
1278 1278 try:
1279 1279 # magically add command line editing support, where
1280 1280 # available
1281 1281 import readline
1282 1282 # force demandimport to really load the module
1283 1283 readline.read_history_file
1284 1284 # windows sometimes raises something other than ImportError
1285 1285 except Exception:
1286 1286 usereadline = False
1287 1287
1288 1288 # prompt ' ' must exist; otherwise readline may delete entire line
1289 1289 # - http://bugs.python.org/issue12833
1290 1290 with self.timeblockedsection('stdio'):
1291 1291 if usereadline:
1292 1292 line = encoding.strtolocal(pycompat.rawinput(r' '))
1293 1293 # When stdin is in binary mode on Windows, it can cause
1294 1294 # raw_input() to emit an extra trailing carriage return
1295 1295 if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'):
1296 1296 line = line[:-1]
1297 1297 else:
1298 1298 self.fout.write(b' ')
1299 1299 self.fout.flush()
1300 1300 line = self.fin.readline()
1301 1301 if not line:
1302 1302 raise EOFError
1303 1303 line = line.rstrip(pycompat.oslinesep)
1304 1304
1305 1305 return line
1306 1306
1307 1307 def prompt(self, msg, default="y"):
1308 1308 """Prompt user with msg, read response.
1309 1309 If ui is not interactive, the default is returned.
1310 1310 """
1311 1311 if not self.interactive():
1312 1312 self.write(msg, ' ', default or '', "\n")
1313 1313 return default
1314 1314 self._writenobuf(msg, label='ui.prompt')
1315 1315 self.flush()
1316 1316 try:
1317 1317 r = self._readline()
1318 1318 if not r:
1319 1319 r = default
1320 1320 if self.configbool('ui', 'promptecho'):
1321 1321 self.write(r, "\n")
1322 1322 return r
1323 1323 except EOFError:
1324 1324 raise error.ResponseExpected()
1325 1325
1326 1326 @staticmethod
1327 1327 def extractchoices(prompt):
1328 1328 """Extract prompt message and list of choices from specified prompt.
1329 1329
1330 1330 This returns tuple "(message, choices)", and "choices" is the
1331 1331 list of tuple "(response character, text without &)".
1332 1332
1333 1333 >>> ui.extractchoices(b"awake? $$ &Yes $$ &No")
1334 1334 ('awake? ', [('y', 'Yes'), ('n', 'No')])
1335 1335 >>> ui.extractchoices(b"line\\nbreak? $$ &Yes $$ &No")
1336 1336 ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
1337 1337 >>> ui.extractchoices(b"want lots of $$money$$?$$Ye&s$$N&o")
1338 1338 ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
1339 1339 """
1340 1340
1341 1341 # Sadly, the prompt string may have been built with a filename
1342 1342 # containing "$$" so let's try to find the first valid-looking
1343 1343 # prompt to start parsing. Sadly, we also can't rely on
1344 1344 # choices containing spaces, ASCII, or basically anything
1345 1345 # except an ampersand followed by a character.
1346 1346 m = re.match(br'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
1347 1347 msg = m.group(1)
1348 1348 choices = [p.strip(' ') for p in m.group(2).split('$$')]
1349 1349 def choicetuple(s):
1350 1350 ampidx = s.index('&')
1351 1351 return s[ampidx + 1:ampidx + 2].lower(), s.replace('&', '', 1)
1352 1352 return (msg, [choicetuple(s) for s in choices])
1353 1353
1354 1354 def promptchoice(self, prompt, default=0):
1355 1355 """Prompt user with a message, read response, and ensure it matches
1356 1356 one of the provided choices. The prompt is formatted as follows:
1357 1357
1358 1358 "would you like fries with that (Yn)? $$ &Yes $$ &No"
1359 1359
1360 1360 The index of the choice is returned. Responses are case
1361 1361 insensitive. If ui is not interactive, the default is
1362 1362 returned.
1363 1363 """
1364 1364
1365 1365 msg, choices = self.extractchoices(prompt)
1366 1366 resps = [r for r, t in choices]
1367 1367 while True:
1368 1368 r = self.prompt(msg, resps[default])
1369 1369 if r.lower() in resps:
1370 1370 return resps.index(r.lower())
1371 1371 self.write(_("unrecognized response\n"))
1372 1372
1373 1373 def getpass(self, prompt=None, default=None):
1374 1374 if not self.interactive():
1375 1375 return default
1376 1376 try:
1377 1377 self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
1378 1378 # disable getpass() only if explicitly specified. it's still valid
1379 1379 # to interact with tty even if fin is not a tty.
1380 1380 with self.timeblockedsection('stdio'):
1381 1381 if self.configbool('ui', 'nontty'):
1382 1382 l = self.fin.readline()
1383 1383 if not l:
1384 1384 raise EOFError
1385 1385 return l.rstrip('\n')
1386 1386 else:
1387 1387 return getpass.getpass('')
1388 1388 except EOFError:
1389 1389 raise error.ResponseExpected()
1390 1390 def status(self, *msg, **opts):
1391 1391 '''write status message to output (if ui.quiet is False)
1392 1392
1393 1393 This adds an output label of "ui.status".
1394 1394 '''
1395 1395 if not self.quiet:
1396 1396 opts[r'label'] = opts.get(r'label', '') + ' ui.status'
1397 1397 self.write(*msg, **opts)
1398 1398 def warn(self, *msg, **opts):
1399 1399 '''write warning message to output (stderr)
1400 1400
1401 1401 This adds an output label of "ui.warning".
1402 1402 '''
1403 1403 opts[r'label'] = opts.get(r'label', '') + ' ui.warning'
1404 1404 self.write_err(*msg, **opts)
1405 1405 def note(self, *msg, **opts):
1406 1406 '''write note to output (if ui.verbose is True)
1407 1407
1408 1408 This adds an output label of "ui.note".
1409 1409 '''
1410 1410 if self.verbose:
1411 1411 opts[r'label'] = opts.get(r'label', '') + ' ui.note'
1412 1412 self.write(*msg, **opts)
1413 1413 def debug(self, *msg, **opts):
1414 1414 '''write debug message to output (if ui.debugflag is True)
1415 1415
1416 1416 This adds an output label of "ui.debug".
1417 1417 '''
1418 1418 if self.debugflag:
1419 1419 opts[r'label'] = opts.get(r'label', '') + ' ui.debug'
1420 1420 self.write(*msg, **opts)
1421 1421
1422 1422 def edit(self, text, user, extra=None, editform=None, pending=None,
1423 1423 repopath=None, action=None):
1424 1424 if action is None:
1425 1425 self.develwarn('action is None but will soon be a required '
1426 1426 'parameter to ui.edit()')
1427 1427 extra_defaults = {
1428 1428 'prefix': 'editor',
1429 1429 'suffix': '.txt',
1430 1430 }
1431 1431 if extra is not None:
1432 1432 if extra.get('suffix') is not None:
1433 1433 self.develwarn('extra.suffix is not None but will soon be '
1434 1434 'ignored by ui.edit()')
1435 1435 extra_defaults.update(extra)
1436 1436 extra = extra_defaults
1437 1437
1438 1438 if action == 'diff':
1439 1439 suffix = '.diff'
1440 1440 elif action:
1441 1441 suffix = '.%s.hg.txt' % action
1442 1442 else:
1443 1443 suffix = extra['suffix']
1444 1444
1445 1445 rdir = None
1446 1446 if self.configbool('experimental', 'editortmpinhg'):
1447 1447 rdir = repopath
1448 1448 (fd, name) = pycompat.mkstemp(prefix='hg-' + extra['prefix'] + '-',
1449 1449 suffix=suffix,
1450 1450 dir=rdir)
1451 1451 try:
1452 1452 f = os.fdopen(fd, r'wb')
1453 1453 f.write(util.tonativeeol(text))
1454 1454 f.close()
1455 1455
1456 1456 environ = {'HGUSER': user}
1457 1457 if 'transplant_source' in extra:
1458 1458 environ.update({'HGREVISION': hex(extra['transplant_source'])})
1459 1459 for label in ('intermediate-source', 'source', 'rebase_source'):
1460 1460 if label in extra:
1461 1461 environ.update({'HGREVISION': extra[label]})
1462 1462 break
1463 1463 if editform:
1464 1464 environ.update({'HGEDITFORM': editform})
1465 1465 if pending:
1466 1466 environ.update({'HG_PENDING': pending})
1467 1467
1468 1468 editor = self.geteditor()
1469 1469
1470 1470 self.system("%s \"%s\"" % (editor, name),
1471 1471 environ=environ,
1472 1472 onerr=error.Abort, errprefix=_("edit failed"),
1473 1473 blockedtag='editor')
1474 1474
1475 1475 f = open(name, r'rb')
1476 1476 t = util.fromnativeeol(f.read())
1477 1477 f.close()
1478 1478 finally:
1479 1479 os.unlink(name)
1480 1480
1481 1481 return t
1482 1482
1483 1483 def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None,
1484 1484 blockedtag=None):
1485 1485 '''execute shell command with appropriate output stream. command
1486 1486 output will be redirected if fout is not stdout.
1487 1487
1488 1488 if command fails and onerr is None, return status, else raise onerr
1489 1489 object as exception.
1490 1490 '''
1491 1491 if blockedtag is None:
1492 1492 # Long cmds tend to be because of an absolute path on cmd. Keep
1493 1493 # the tail end instead
1494 1494 cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
1495 1495 blockedtag = 'unknown_system_' + cmdsuffix
1496 1496 out = self.fout
1497 1497 if any(s[1] for s in self._bufferstates):
1498 1498 out = self
1499 1499 with self.timeblockedsection(blockedtag):
1500 1500 rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
1501 1501 if rc and onerr:
1502 1502 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
1503 1503 procutil.explainexit(rc))
1504 1504 if errprefix:
1505 1505 errmsg = '%s: %s' % (errprefix, errmsg)
1506 1506 raise onerr(errmsg)
1507 1507 return rc
1508 1508
1509 1509 def _runsystem(self, cmd, environ, cwd, out):
1510 1510 """actually execute the given shell command (can be overridden by
1511 1511 extensions like chg)"""
1512 1512 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
1513 1513
1514 1514 def traceback(self, exc=None, force=False):
1515 1515 '''print exception traceback if traceback printing enabled or forced.
1516 1516 only to call in exception handler. returns true if traceback
1517 1517 printed.'''
1518 1518 if self.tracebackflag or force:
1519 1519 if exc is None:
1520 1520 exc = sys.exc_info()
1521 1521 cause = getattr(exc[1], 'cause', None)
1522 1522
1523 1523 if cause is not None:
1524 1524 causetb = traceback.format_tb(cause[2])
1525 1525 exctb = traceback.format_tb(exc[2])
1526 1526 exconly = traceback.format_exception_only(cause[0], cause[1])
1527 1527
1528 1528 # exclude frame where 'exc' was chained and rethrown from exctb
1529 1529 self.write_err('Traceback (most recent call last):\n',
1530 1530 ''.join(exctb[:-1]),
1531 1531 ''.join(causetb),
1532 1532 ''.join(exconly))
1533 1533 else:
1534 1534 output = traceback.format_exception(exc[0], exc[1], exc[2])
1535 1535 self.write_err(encoding.strtolocal(r''.join(output)))
1536 1536 return self.tracebackflag or force
1537 1537
1538 1538 def geteditor(self):
1539 1539 '''return editor to use'''
1540 1540 if pycompat.sysplatform == 'plan9':
1541 1541 # vi is the MIPS instruction simulator on Plan 9. We
1542 1542 # instead default to E to plumb commit messages to
1543 1543 # avoid confusion.
1544 1544 editor = 'E'
1545 1545 else:
1546 1546 editor = 'vi'
1547 1547 return (encoding.environ.get("HGEDITOR") or
1548 1548 self.config("ui", "editor", editor))
1549 1549
1550 1550 @util.propertycache
1551 1551 def _progbar(self):
1552 1552 """setup the progbar singleton to the ui object"""
1553 1553 if (self.quiet or self.debugflag
1554 1554 or self.configbool('progress', 'disable')
1555 1555 or not progress.shouldprint(self)):
1556 1556 return None
1557 1557 return getprogbar(self)
1558 1558
1559 1559 def _progclear(self):
1560 1560 """clear progress bar output if any. use it before any output"""
1561 1561 if not haveprogbar(): # nothing loaded yet
1562 1562 return
1563 1563 if self._progbar is not None and self._progbar.printed:
1564 1564 self._progbar.clear()
1565 1565
1566 1566 def progress(self, topic, pos, item="", unit="", total=None):
1567 1567 '''show a progress message
1568 1568
1569 1569 By default a textual progress bar will be displayed if an operation
1570 1570 takes too long. 'topic' is the current operation, 'item' is a
1571 1571 non-numeric marker of the current position (i.e. the currently
1572 1572 in-process file), 'pos' is the current numeric position (i.e.
1573 1573 revision, bytes, etc.), unit is a corresponding unit label,
1574 1574 and total is the highest expected pos.
1575 1575
1576 1576 Multiple nested topics may be active at a time.
1577 1577
1578 1578 All topics should be marked closed by setting pos to None at
1579 1579 termination.
1580 1580 '''
1581 1581 if self._progbar is not None:
1582 1582 self._progbar.progress(topic, pos, item=item, unit=unit,
1583 1583 total=total)
1584 1584 if pos is None or not self.configbool('progress', 'debug'):
1585 1585 return
1586 1586
1587 1587 if unit:
1588 1588 unit = ' ' + unit
1589 1589 if item:
1590 1590 item = ' ' + item
1591 1591
1592 1592 if total:
1593 1593 pct = 100.0 * pos / total
1594 1594 self.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1595 1595 % (topic, item, pos, total, unit, pct))
1596 1596 else:
1597 1597 self.debug('%s:%s %d%s\n' % (topic, item, pos, unit))
1598 1598
1599 def makeprogress(self, topic, unit="", total=None):
1600 '''exists only so low-level modules won't need to import scmutil'''
1601 return scmutil.progress(self, topic, unit, total)
1602
1599 1603 def log(self, service, *msg, **opts):
1600 1604 '''hook for logging facility extensions
1601 1605
1602 1606 service should be a readily-identifiable subsystem, which will
1603 1607 allow filtering.
1604 1608
1605 1609 *msg should be a newline-terminated format string to log, and
1606 1610 then any values to %-format into that format string.
1607 1611
1608 1612 **opts currently has no defined meanings.
1609 1613 '''
1610 1614
1611 1615 def label(self, msg, label):
1612 1616 '''style msg based on supplied label
1613 1617
1614 1618 If some color mode is enabled, this will add the necessary control
1615 1619 characters to apply such color. In addition, 'debug' color mode adds
1616 1620 markup showing which label affects a piece of text.
1617 1621
1618 1622 ui.write(s, 'label') is equivalent to
1619 1623 ui.write(ui.label(s, 'label')).
1620 1624 '''
1621 1625 if self._colormode is not None:
1622 1626 return color.colorlabel(self, msg, label)
1623 1627 return msg
1624 1628
1625 1629 def develwarn(self, msg, stacklevel=1, config=None):
1626 1630 """issue a developer warning message
1627 1631
1628 1632 Use 'stacklevel' to report the offender some layers further up in the
1629 1633 stack.
1630 1634 """
1631 1635 if not self.configbool('devel', 'all-warnings'):
1632 1636 if config is None or not self.configbool('devel', config):
1633 1637 return
1634 1638 msg = 'devel-warn: ' + msg
1635 1639 stacklevel += 1 # get in develwarn
1636 1640 if self.tracebackflag:
1637 1641 util.debugstacktrace(msg, stacklevel, self.ferr, self.fout)
1638 1642 self.log('develwarn', '%s at:\n%s' %
1639 1643 (msg, ''.join(util.getstackframes(stacklevel))))
1640 1644 else:
1641 1645 curframe = inspect.currentframe()
1642 1646 calframe = inspect.getouterframes(curframe, 2)
1643 1647 fname, lineno, fmsg = calframe[stacklevel][1:4]
1644 1648 fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
1645 1649 self.write_err('%s at: %s:%d (%s)\n'
1646 1650 % (msg, fname, lineno, fmsg))
1647 1651 self.log('develwarn', '%s at: %s:%d (%s)\n',
1648 1652 msg, fname, lineno, fmsg)
1649 1653 curframe = calframe = None # avoid cycles
1650 1654
1651 1655 def deprecwarn(self, msg, version, stacklevel=2):
1652 1656 """issue a deprecation warning
1653 1657
1654 1658 - msg: message explaining what is deprecated and how to upgrade,
1655 1659 - version: last version where the API will be supported,
1656 1660 """
1657 1661 if not (self.configbool('devel', 'all-warnings')
1658 1662 or self.configbool('devel', 'deprec-warn')):
1659 1663 return
1660 1664 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
1661 1665 " update your code.)") % version
1662 1666 self.develwarn(msg, stacklevel=stacklevel, config='deprec-warn')
1663 1667
1664 1668 def exportableenviron(self):
1665 1669 """The environment variables that are safe to export, e.g. through
1666 1670 hgweb.
1667 1671 """
1668 1672 return self._exportableenviron
1669 1673
1670 1674 @contextlib.contextmanager
1671 1675 def configoverride(self, overrides, source=""):
1672 1676 """Context manager for temporary config overrides
1673 1677 `overrides` must be a dict of the following structure:
1674 1678 {(section, name) : value}"""
1675 1679 backups = {}
1676 1680 try:
1677 1681 for (section, name), value in overrides.items():
1678 1682 backups[(section, name)] = self.backupconfig(section, name)
1679 1683 self.setconfig(section, name, value, source)
1680 1684 yield
1681 1685 finally:
1682 1686 for __, backup in backups.items():
1683 1687 self.restoreconfig(backup)
1684 1688 # just restoring ui.quiet config to the previous value is not enough
1685 1689 # as it does not update ui.quiet class member
1686 1690 if ('ui', 'quiet') in overrides:
1687 1691 self.fixconfig(section='ui')
1688 1692
1689 1693 class paths(dict):
1690 1694 """Represents a collection of paths and their configs.
1691 1695
1692 1696 Data is initially derived from ui instances and the config files they have
1693 1697 loaded.
1694 1698 """
1695 1699 def __init__(self, ui):
1696 1700 dict.__init__(self)
1697 1701
1698 1702 for name, loc in ui.configitems('paths', ignoresub=True):
1699 1703 # No location is the same as not existing.
1700 1704 if not loc:
1701 1705 continue
1702 1706 loc, sub = ui.configsuboptions('paths', name)
1703 1707 self[name] = path(ui, name, rawloc=loc, suboptions=sub)
1704 1708
1705 1709 def getpath(self, name, default=None):
1706 1710 """Return a ``path`` from a string, falling back to default.
1707 1711
1708 1712 ``name`` can be a named path or locations. Locations are filesystem
1709 1713 paths or URIs.
1710 1714
1711 1715 Returns None if ``name`` is not a registered path, a URI, or a local
1712 1716 path to a repo.
1713 1717 """
1714 1718 # Only fall back to default if no path was requested.
1715 1719 if name is None:
1716 1720 if not default:
1717 1721 default = ()
1718 1722 elif not isinstance(default, (tuple, list)):
1719 1723 default = (default,)
1720 1724 for k in default:
1721 1725 try:
1722 1726 return self[k]
1723 1727 except KeyError:
1724 1728 continue
1725 1729 return None
1726 1730
1727 1731 # Most likely empty string.
1728 1732 # This may need to raise in the future.
1729 1733 if not name:
1730 1734 return None
1731 1735
1732 1736 try:
1733 1737 return self[name]
1734 1738 except KeyError:
1735 1739 # Try to resolve as a local path or URI.
1736 1740 try:
1737 1741 # We don't pass sub-options in, so no need to pass ui instance.
1738 1742 return path(None, None, rawloc=name)
1739 1743 except ValueError:
1740 1744 raise error.RepoError(_('repository %s does not exist') %
1741 1745 name)
1742 1746
1743 1747 _pathsuboptions = {}
1744 1748
1745 1749 def pathsuboption(option, attr):
1746 1750 """Decorator used to declare a path sub-option.
1747 1751
1748 1752 Arguments are the sub-option name and the attribute it should set on
1749 1753 ``path`` instances.
1750 1754
1751 1755 The decorated function will receive as arguments a ``ui`` instance,
1752 1756 ``path`` instance, and the string value of this option from the config.
1753 1757 The function should return the value that will be set on the ``path``
1754 1758 instance.
1755 1759
1756 1760 This decorator can be used to perform additional verification of
1757 1761 sub-options and to change the type of sub-options.
1758 1762 """
1759 1763 def register(func):
1760 1764 _pathsuboptions[option] = (attr, func)
1761 1765 return func
1762 1766 return register
1763 1767
1764 1768 @pathsuboption('pushurl', 'pushloc')
1765 1769 def pushurlpathoption(ui, path, value):
1766 1770 u = util.url(value)
1767 1771 # Actually require a URL.
1768 1772 if not u.scheme:
1769 1773 ui.warn(_('(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
1770 1774 return None
1771 1775
1772 1776 # Don't support the #foo syntax in the push URL to declare branch to
1773 1777 # push.
1774 1778 if u.fragment:
1775 1779 ui.warn(_('("#fragment" in paths.%s:pushurl not supported; '
1776 1780 'ignoring)\n') % path.name)
1777 1781 u.fragment = None
1778 1782
1779 1783 return bytes(u)
1780 1784
1781 1785 @pathsuboption('pushrev', 'pushrev')
1782 1786 def pushrevpathoption(ui, path, value):
1783 1787 return value
1784 1788
1785 1789 class path(object):
1786 1790 """Represents an individual path and its configuration."""
1787 1791
1788 1792 def __init__(self, ui, name, rawloc=None, suboptions=None):
1789 1793 """Construct a path from its config options.
1790 1794
1791 1795 ``ui`` is the ``ui`` instance the path is coming from.
1792 1796 ``name`` is the symbolic name of the path.
1793 1797 ``rawloc`` is the raw location, as defined in the config.
1794 1798 ``pushloc`` is the raw locations pushes should be made to.
1795 1799
1796 1800 If ``name`` is not defined, we require that the location be a) a local
1797 1801 filesystem path with a .hg directory or b) a URL. If not,
1798 1802 ``ValueError`` is raised.
1799 1803 """
1800 1804 if not rawloc:
1801 1805 raise ValueError('rawloc must be defined')
1802 1806
1803 1807 # Locations may define branches via syntax <base>#<branch>.
1804 1808 u = util.url(rawloc)
1805 1809 branch = None
1806 1810 if u.fragment:
1807 1811 branch = u.fragment
1808 1812 u.fragment = None
1809 1813
1810 1814 self.url = u
1811 1815 self.branch = branch
1812 1816
1813 1817 self.name = name
1814 1818 self.rawloc = rawloc
1815 1819 self.loc = '%s' % u
1816 1820
1817 1821 # When given a raw location but not a symbolic name, validate the
1818 1822 # location is valid.
1819 1823 if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
1820 1824 raise ValueError('location is not a URL or path to a local '
1821 1825 'repo: %s' % rawloc)
1822 1826
1823 1827 suboptions = suboptions or {}
1824 1828
1825 1829 # Now process the sub-options. If a sub-option is registered, its
1826 1830 # attribute will always be present. The value will be None if there
1827 1831 # was no valid sub-option.
1828 1832 for suboption, (attr, func) in _pathsuboptions.iteritems():
1829 1833 if suboption not in suboptions:
1830 1834 setattr(self, attr, None)
1831 1835 continue
1832 1836
1833 1837 value = func(ui, self, suboptions[suboption])
1834 1838 setattr(self, attr, value)
1835 1839
1836 1840 def _isvalidlocalpath(self, path):
1837 1841 """Returns True if the given path is a potentially valid repository.
1838 1842 This is its own function so that extensions can change the definition of
1839 1843 'valid' in this case (like when pulling from a git repo into a hg
1840 1844 one)."""
1841 1845 return os.path.isdir(os.path.join(path, '.hg'))
1842 1846
1843 1847 @property
1844 1848 def suboptions(self):
1845 1849 """Return sub-options and their values for this path.
1846 1850
1847 1851 This is intended to be used for presentation purposes.
1848 1852 """
1849 1853 d = {}
1850 1854 for subopt, (attr, _func) in _pathsuboptions.iteritems():
1851 1855 value = getattr(self, attr)
1852 1856 if value is not None:
1853 1857 d[subopt] = value
1854 1858 return d
1855 1859
1856 1860 # we instantiate one globally shared progress bar to avoid
1857 1861 # competing progress bars when multiple UI objects get created
1858 1862 _progresssingleton = None
1859 1863
1860 1864 def getprogbar(ui):
1861 1865 global _progresssingleton
1862 1866 if _progresssingleton is None:
1863 1867 # passing 'ui' object to the singleton is fishy,
1864 1868 # this is how the extension used to work but feel free to rework it.
1865 1869 _progresssingleton = progress.progbar(ui)
1866 1870 return _progresssingleton
1867 1871
1868 1872 def haveprogbar():
1869 1873 return _progresssingleton is not None
General Comments 0
You need to be logged in to leave comments. Login now