##// END OF EJS Templates
merge: add merge action 'p' to record path conflicts during update...
Mark Thomas -
r34548:81aebcc7 default
parent child Browse files
Show More
@@ -1,1796 +1,1812 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import shutil
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullhex,
22 22 nullid,
23 23 nullrev,
24 24 )
25 25 from . import (
26 26 copies,
27 27 error,
28 28 filemerge,
29 29 match as matchmod,
30 30 obsutil,
31 31 pycompat,
32 32 scmutil,
33 33 subrepo,
34 34 util,
35 35 worker,
36 36 )
37 37
38 38 _pack = struct.pack
39 39 _unpack = struct.unpack
40 40
41 41 def _droponode(data):
42 42 # used for compatibility for v1
43 43 bits = data.split('\0')
44 44 bits = bits[:-2] + bits[-1:]
45 45 return '\0'.join(bits)
46 46
47 47 class mergestate(object):
48 48 '''track 3-way merge state of individual files
49 49
50 50 The merge state is stored on disk when needed. Two files are used: one with
51 51 an old format (version 1), and one with a new format (version 2). Version 2
52 52 stores a superset of the data in version 1, including new kinds of records
53 53 in the future. For more about the new format, see the documentation for
54 54 `_readrecordsv2`.
55 55
56 56 Each record can contain arbitrary content, and has an associated type. This
57 57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 58 versions of Mercurial that don't support it should abort. If `type` is
59 59 lowercase, the record can be safely ignored.
60 60
61 61 Currently known records:
62 62
63 63 L: the node of the "local" part of the merge (hexified version)
64 64 O: the node of the "other" part of the merge (hexified version)
65 65 F: a file to be merged entry
66 66 C: a change/delete or delete/change conflict
67 67 D: a file that the external merge driver will merge internally
68 68 (experimental)
69 69 P: a path conflict (file vs directory)
70 70 m: the external merge driver defined for this merge plus its run state
71 71 (experimental)
72 72 f: a (filename, dictionary) tuple of optional values for a given file
73 73 X: unsupported mandatory record type (used in tests)
74 74 x: unsupported advisory record type (used in tests)
75 75 l: the labels for the parts of the merge.
76 76
77 77 Merge driver run states (experimental):
78 78 u: driver-resolved files unmarked -- needs to be run next time we're about
79 79 to resolve or commit
80 80 m: driver-resolved files marked -- only needs to be run before commit
81 81 s: success/skipped -- does not need to be run any more
82 82
83 83 Merge record states (stored in self._state, indexed by filename):
84 84 u: unresolved conflict
85 85 r: resolved conflict
86 86 pu: unresolved path conflict (file conflicts with directory)
87 87 pr: resolved path conflict
88 88 d: driver-resolved conflict
89 89
90 90 The resolve command transitions between 'u' and 'r' for conflicts and
91 91 'pu' and 'pr' for path conflicts.
92 92 '''
93 93 statepathv1 = 'merge/state'
94 94 statepathv2 = 'merge/state2'
95 95
96 96 @staticmethod
97 97 def clean(repo, node=None, other=None, labels=None):
98 98 """Initialize a brand new merge state, removing any existing state on
99 99 disk."""
100 100 ms = mergestate(repo)
101 101 ms.reset(node, other, labels)
102 102 return ms
103 103
104 104 @staticmethod
105 105 def read(repo):
106 106 """Initialize the merge state, reading it from disk."""
107 107 ms = mergestate(repo)
108 108 ms._read()
109 109 return ms
110 110
111 111 def __init__(self, repo):
112 112 """Initialize the merge state.
113 113
114 114 Do not use this directly! Instead call read() or clean()."""
115 115 self._repo = repo
116 116 self._dirty = False
117 117 self._labels = None
118 118
119 119 def reset(self, node=None, other=None, labels=None):
120 120 self._state = {}
121 121 self._stateextras = {}
122 122 self._local = None
123 123 self._other = None
124 124 self._labels = labels
125 125 for var in ('localctx', 'otherctx'):
126 126 if var in vars(self):
127 127 delattr(self, var)
128 128 if node:
129 129 self._local = node
130 130 self._other = other
131 131 self._readmergedriver = None
132 132 if self.mergedriver:
133 133 self._mdstate = 's'
134 134 else:
135 135 self._mdstate = 'u'
136 136 shutil.rmtree(self._repo.vfs.join('merge'), True)
137 137 self._results = {}
138 138 self._dirty = False
139 139
140 140 def _read(self):
141 141 """Analyse each record content to restore a serialized state from disk
142 142
143 143 This function process "record" entry produced by the de-serialization
144 144 of on disk file.
145 145 """
146 146 self._state = {}
147 147 self._stateextras = {}
148 148 self._local = None
149 149 self._other = None
150 150 for var in ('localctx', 'otherctx'):
151 151 if var in vars(self):
152 152 delattr(self, var)
153 153 self._readmergedriver = None
154 154 self._mdstate = 's'
155 155 unsupported = set()
156 156 records = self._readrecords()
157 157 for rtype, record in records:
158 158 if rtype == 'L':
159 159 self._local = bin(record)
160 160 elif rtype == 'O':
161 161 self._other = bin(record)
162 162 elif rtype == 'm':
163 163 bits = record.split('\0', 1)
164 164 mdstate = bits[1]
165 165 if len(mdstate) != 1 or mdstate not in 'ums':
166 166 # the merge driver should be idempotent, so just rerun it
167 167 mdstate = 'u'
168 168
169 169 self._readmergedriver = bits[0]
170 170 self._mdstate = mdstate
171 171 elif rtype in 'FDCP':
172 172 bits = record.split('\0')
173 173 self._state[bits[0]] = bits[1:]
174 174 elif rtype == 'f':
175 175 filename, rawextras = record.split('\0', 1)
176 176 extraparts = rawextras.split('\0')
177 177 extras = {}
178 178 i = 0
179 179 while i < len(extraparts):
180 180 extras[extraparts[i]] = extraparts[i + 1]
181 181 i += 2
182 182
183 183 self._stateextras[filename] = extras
184 184 elif rtype == 'l':
185 185 labels = record.split('\0', 2)
186 186 self._labels = [l for l in labels if len(l) > 0]
187 187 elif not rtype.islower():
188 188 unsupported.add(rtype)
189 189 self._results = {}
190 190 self._dirty = False
191 191
192 192 if unsupported:
193 193 raise error.UnsupportedMergeRecords(unsupported)
194 194
195 195 def _readrecords(self):
196 196 """Read merge state from disk and return a list of record (TYPE, data)
197 197
198 198 We read data from both v1 and v2 files and decide which one to use.
199 199
200 200 V1 has been used by version prior to 2.9.1 and contains less data than
201 201 v2. We read both versions and check if no data in v2 contradicts
202 202 v1. If there is not contradiction we can safely assume that both v1
203 203 and v2 were written at the same time and use the extract data in v2. If
204 204 there is contradiction we ignore v2 content as we assume an old version
205 205 of Mercurial has overwritten the mergestate file and left an old v2
206 206 file around.
207 207
208 208 returns list of record [(TYPE, data), ...]"""
209 209 v1records = self._readrecordsv1()
210 210 v2records = self._readrecordsv2()
211 211 if self._v1v2match(v1records, v2records):
212 212 return v2records
213 213 else:
214 214 # v1 file is newer than v2 file, use it
215 215 # we have to infer the "other" changeset of the merge
216 216 # we cannot do better than that with v1 of the format
217 217 mctx = self._repo[None].parents()[-1]
218 218 v1records.append(('O', mctx.hex()))
219 219 # add place holder "other" file node information
220 220 # nobody is using it yet so we do no need to fetch the data
221 221 # if mctx was wrong `mctx[bits[-2]]` may fails.
222 222 for idx, r in enumerate(v1records):
223 223 if r[0] == 'F':
224 224 bits = r[1].split('\0')
225 225 bits.insert(-2, '')
226 226 v1records[idx] = (r[0], '\0'.join(bits))
227 227 return v1records
228 228
229 229 def _v1v2match(self, v1records, v2records):
230 230 oldv2 = set() # old format version of v2 record
231 231 for rec in v2records:
232 232 if rec[0] == 'L':
233 233 oldv2.add(rec)
234 234 elif rec[0] == 'F':
235 235 # drop the onode data (not contained in v1)
236 236 oldv2.add(('F', _droponode(rec[1])))
237 237 for rec in v1records:
238 238 if rec not in oldv2:
239 239 return False
240 240 else:
241 241 return True
242 242
243 243 def _readrecordsv1(self):
244 244 """read on disk merge state for version 1 file
245 245
246 246 returns list of record [(TYPE, data), ...]
247 247
248 248 Note: the "F" data from this file are one entry short
249 249 (no "other file node" entry)
250 250 """
251 251 records = []
252 252 try:
253 253 f = self._repo.vfs(self.statepathv1)
254 254 for i, l in enumerate(f):
255 255 if i == 0:
256 256 records.append(('L', l[:-1]))
257 257 else:
258 258 records.append(('F', l[:-1]))
259 259 f.close()
260 260 except IOError as err:
261 261 if err.errno != errno.ENOENT:
262 262 raise
263 263 return records
264 264
265 265 def _readrecordsv2(self):
266 266 """read on disk merge state for version 2 file
267 267
268 268 This format is a list of arbitrary records of the form:
269 269
270 270 [type][length][content]
271 271
272 272 `type` is a single character, `length` is a 4 byte integer, and
273 273 `content` is an arbitrary byte sequence of length `length`.
274 274
275 275 Mercurial versions prior to 3.7 have a bug where if there are
276 276 unsupported mandatory merge records, attempting to clear out the merge
277 277 state with hg update --clean or similar aborts. The 't' record type
278 278 works around that by writing out what those versions treat as an
279 279 advisory record, but later versions interpret as special: the first
280 280 character is the 'real' record type and everything onwards is the data.
281 281
282 282 Returns list of records [(TYPE, data), ...]."""
283 283 records = []
284 284 try:
285 285 f = self._repo.vfs(self.statepathv2)
286 286 data = f.read()
287 287 off = 0
288 288 end = len(data)
289 289 while off < end:
290 290 rtype = data[off]
291 291 off += 1
292 292 length = _unpack('>I', data[off:(off + 4)])[0]
293 293 off += 4
294 294 record = data[off:(off + length)]
295 295 off += length
296 296 if rtype == 't':
297 297 rtype, record = record[0], record[1:]
298 298 records.append((rtype, record))
299 299 f.close()
300 300 except IOError as err:
301 301 if err.errno != errno.ENOENT:
302 302 raise
303 303 return records
304 304
305 305 @util.propertycache
306 306 def mergedriver(self):
307 307 # protect against the following:
308 308 # - A configures a malicious merge driver in their hgrc, then
309 309 # pauses the merge
310 310 # - A edits their hgrc to remove references to the merge driver
311 311 # - A gives a copy of their entire repo, including .hg, to B
312 312 # - B inspects .hgrc and finds it to be clean
313 313 # - B then continues the merge and the malicious merge driver
314 314 # gets invoked
315 315 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
316 316 if (self._readmergedriver is not None
317 317 and self._readmergedriver != configmergedriver):
318 318 raise error.ConfigError(
319 319 _("merge driver changed since merge started"),
320 320 hint=_("revert merge driver change or abort merge"))
321 321
322 322 return configmergedriver
323 323
324 324 @util.propertycache
325 325 def localctx(self):
326 326 if self._local is None:
327 327 msg = "localctx accessed but self._local isn't set"
328 328 raise error.ProgrammingError(msg)
329 329 return self._repo[self._local]
330 330
331 331 @util.propertycache
332 332 def otherctx(self):
333 333 if self._other is None:
334 334 msg = "otherctx accessed but self._other isn't set"
335 335 raise error.ProgrammingError(msg)
336 336 return self._repo[self._other]
337 337
338 338 def active(self):
339 339 """Whether mergestate is active.
340 340
341 341 Returns True if there appears to be mergestate. This is a rough proxy
342 342 for "is a merge in progress."
343 343 """
344 344 # Check local variables before looking at filesystem for performance
345 345 # reasons.
346 346 return bool(self._local) or bool(self._state) or \
347 347 self._repo.vfs.exists(self.statepathv1) or \
348 348 self._repo.vfs.exists(self.statepathv2)
349 349
350 350 def commit(self):
351 351 """Write current state on disk (if necessary)"""
352 352 if self._dirty:
353 353 records = self._makerecords()
354 354 self._writerecords(records)
355 355 self._dirty = False
356 356
357 357 def _makerecords(self):
358 358 records = []
359 359 records.append(('L', hex(self._local)))
360 360 records.append(('O', hex(self._other)))
361 361 if self.mergedriver:
362 362 records.append(('m', '\0'.join([
363 363 self.mergedriver, self._mdstate])))
364 364 for d, v in self._state.iteritems():
365 365 if v[0] == 'd':
366 366 records.append(('D', '\0'.join([d] + v)))
367 367 elif v[0] in ('pu', 'pr'):
368 368 records.append(('P', '\0'.join([d] + v)))
369 369 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
370 370 # older versions of Mercurial
371 371 elif v[1] == nullhex or v[6] == nullhex:
372 372 records.append(('C', '\0'.join([d] + v)))
373 373 else:
374 374 records.append(('F', '\0'.join([d] + v)))
375 375 for filename, extras in sorted(self._stateextras.iteritems()):
376 376 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
377 377 extras.iteritems())
378 378 records.append(('f', '%s\0%s' % (filename, rawextras)))
379 379 if self._labels is not None:
380 380 labels = '\0'.join(self._labels)
381 381 records.append(('l', labels))
382 382 return records
383 383
384 384 def _writerecords(self, records):
385 385 """Write current state on disk (both v1 and v2)"""
386 386 self._writerecordsv1(records)
387 387 self._writerecordsv2(records)
388 388
389 389 def _writerecordsv1(self, records):
390 390 """Write current state on disk in a version 1 file"""
391 391 f = self._repo.vfs(self.statepathv1, 'w')
392 392 irecords = iter(records)
393 393 lrecords = next(irecords)
394 394 assert lrecords[0] == 'L'
395 395 f.write(hex(self._local) + '\n')
396 396 for rtype, data in irecords:
397 397 if rtype == 'F':
398 398 f.write('%s\n' % _droponode(data))
399 399 f.close()
400 400
401 401 def _writerecordsv2(self, records):
402 402 """Write current state on disk in a version 2 file
403 403
404 404 See the docstring for _readrecordsv2 for why we use 't'."""
405 405 # these are the records that all version 2 clients can read
406 406 whitelist = 'LOF'
407 407 f = self._repo.vfs(self.statepathv2, 'w')
408 408 for key, data in records:
409 409 assert len(key) == 1
410 410 if key not in whitelist:
411 411 key, data = 't', '%s%s' % (key, data)
412 412 format = '>sI%is' % len(data)
413 413 f.write(_pack(format, key, len(data), data))
414 414 f.close()
415 415
416 416 def add(self, fcl, fco, fca, fd):
417 417 """add a new (potentially?) conflicting file the merge state
418 418 fcl: file context for local,
419 419 fco: file context for remote,
420 420 fca: file context for ancestors,
421 421 fd: file path of the resulting merge.
422 422
423 423 note: also write the local version to the `.hg/merge` directory.
424 424 """
425 425 if fcl.isabsent():
426 426 hash = nullhex
427 427 else:
428 428 hash = hex(hashlib.sha1(fcl.path()).digest())
429 429 self._repo.vfs.write('merge/' + hash, fcl.data())
430 430 self._state[fd] = ['u', hash, fcl.path(),
431 431 fca.path(), hex(fca.filenode()),
432 432 fco.path(), hex(fco.filenode()),
433 433 fcl.flags()]
434 434 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
435 435 self._dirty = True
436 436
437 437 def addpath(self, path, frename, forigin):
438 438 """add a new conflicting path to the merge state
439 439 path: the path that conflicts
440 440 frename: the filename the conflicting file was renamed to
441 441 forigin: origin of the file ('l' or 'r' for local/remote)
442 442 """
443 443 self._state[path] = ['pu', frename, forigin]
444 444 self._dirty = True
445 445
446 446 def __contains__(self, dfile):
447 447 return dfile in self._state
448 448
449 449 def __getitem__(self, dfile):
450 450 return self._state[dfile][0]
451 451
452 452 def __iter__(self):
453 453 return iter(sorted(self._state))
454 454
455 455 def files(self):
456 456 return self._state.keys()
457 457
458 458 def mark(self, dfile, state):
459 459 self._state[dfile][0] = state
460 460 self._dirty = True
461 461
462 462 def mdstate(self):
463 463 return self._mdstate
464 464
465 465 def unresolved(self):
466 466 """Obtain the paths of unresolved files."""
467 467
468 468 for f, entry in self._state.iteritems():
469 469 if entry[0] in ('u', 'pu'):
470 470 yield f
471 471
472 472 def driverresolved(self):
473 473 """Obtain the paths of driver-resolved files."""
474 474
475 475 for f, entry in self._state.items():
476 476 if entry[0] == 'd':
477 477 yield f
478 478
479 479 def extras(self, filename):
480 480 return self._stateextras.setdefault(filename, {})
481 481
482 482 def _resolve(self, preresolve, dfile, wctx):
483 483 """rerun merge process for file path `dfile`"""
484 484 if self[dfile] in 'rd':
485 485 return True, 0
486 486 stateentry = self._state[dfile]
487 487 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
488 488 octx = self._repo[self._other]
489 489 extras = self.extras(dfile)
490 490 anccommitnode = extras.get('ancestorlinknode')
491 491 if anccommitnode:
492 492 actx = self._repo[anccommitnode]
493 493 else:
494 494 actx = None
495 495 fcd = self._filectxorabsent(hash, wctx, dfile)
496 496 fco = self._filectxorabsent(onode, octx, ofile)
497 497 # TODO: move this to filectxorabsent
498 498 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
499 499 # "premerge" x flags
500 500 flo = fco.flags()
501 501 fla = fca.flags()
502 502 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
503 503 if fca.node() == nullid and flags != flo:
504 504 if preresolve:
505 505 self._repo.ui.warn(
506 506 _('warning: cannot merge flags for %s '
507 507 'without common ancestor - keeping local flags\n')
508 508 % afile)
509 509 elif flags == fla:
510 510 flags = flo
511 511 if preresolve:
512 512 # restore local
513 513 if hash != nullhex:
514 514 f = self._repo.vfs('merge/' + hash)
515 515 wctx[dfile].write(f.read(), flags)
516 516 f.close()
517 517 else:
518 518 wctx[dfile].remove(ignoremissing=True)
519 519 complete, r, deleted = filemerge.premerge(self._repo, wctx,
520 520 self._local, lfile, fcd,
521 521 fco, fca,
522 522 labels=self._labels)
523 523 else:
524 524 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
525 525 self._local, lfile, fcd,
526 526 fco, fca,
527 527 labels=self._labels)
528 528 if r is None:
529 529 # no real conflict
530 530 del self._state[dfile]
531 531 self._stateextras.pop(dfile, None)
532 532 self._dirty = True
533 533 elif not r:
534 534 self.mark(dfile, 'r')
535 535
536 536 if complete:
537 537 action = None
538 538 if deleted:
539 539 if fcd.isabsent():
540 540 # dc: local picked. Need to drop if present, which may
541 541 # happen on re-resolves.
542 542 action = 'f'
543 543 else:
544 544 # cd: remote picked (or otherwise deleted)
545 545 action = 'r'
546 546 else:
547 547 if fcd.isabsent(): # dc: remote picked
548 548 action = 'g'
549 549 elif fco.isabsent(): # cd: local picked
550 550 if dfile in self.localctx:
551 551 action = 'am'
552 552 else:
553 553 action = 'a'
554 554 # else: regular merges (no action necessary)
555 555 self._results[dfile] = r, action
556 556
557 557 return complete, r
558 558
559 559 def _filectxorabsent(self, hexnode, ctx, f):
560 560 if hexnode == nullhex:
561 561 return filemerge.absentfilectx(ctx, f)
562 562 else:
563 563 return ctx[f]
564 564
565 565 def preresolve(self, dfile, wctx):
566 566 """run premerge process for dfile
567 567
568 568 Returns whether the merge is complete, and the exit code."""
569 569 return self._resolve(True, dfile, wctx)
570 570
571 571 def resolve(self, dfile, wctx):
572 572 """run merge process (assuming premerge was run) for dfile
573 573
574 574 Returns the exit code of the merge."""
575 575 return self._resolve(False, dfile, wctx)[1]
576 576
577 577 def counts(self):
578 578 """return counts for updated, merged and removed files in this
579 579 session"""
580 580 updated, merged, removed = 0, 0, 0
581 581 for r, action in self._results.itervalues():
582 582 if r is None:
583 583 updated += 1
584 584 elif r == 0:
585 585 if action == 'r':
586 586 removed += 1
587 587 else:
588 588 merged += 1
589 589 return updated, merged, removed
590 590
591 591 def unresolvedcount(self):
592 592 """get unresolved count for this merge (persistent)"""
593 593 return len(list(self.unresolved()))
594 594
595 595 def actions(self):
596 596 """return lists of actions to perform on the dirstate"""
597 597 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
598 598 for f, (r, action) in self._results.iteritems():
599 599 if action is not None:
600 600 actions[action].append((f, None, "merge result"))
601 601 return actions
602 602
603 603 def recordactions(self):
604 604 """record remove/add/get actions in the dirstate"""
605 605 branchmerge = self._repo.dirstate.p2() != nullid
606 606 recordupdates(self._repo, self.actions(), branchmerge)
607 607
608 608 def queueremove(self, f):
609 609 """queues a file to be removed from the dirstate
610 610
611 611 Meant for use by custom merge drivers."""
612 612 self._results[f] = 0, 'r'
613 613
614 614 def queueadd(self, f):
615 615 """queues a file to be added to the dirstate
616 616
617 617 Meant for use by custom merge drivers."""
618 618 self._results[f] = 0, 'a'
619 619
620 620 def queueget(self, f):
621 621 """queues a file to be marked modified in the dirstate
622 622
623 623 Meant for use by custom merge drivers."""
624 624 self._results[f] = 0, 'g'
625 625
626 626 def _getcheckunknownconfig(repo, section, name):
627 627 config = repo.ui.config(section, name)
628 628 valid = ['abort', 'ignore', 'warn']
629 629 if config not in valid:
630 630 validstr = ', '.join(["'" + v + "'" for v in valid])
631 631 raise error.ConfigError(_("%s.%s not valid "
632 632 "('%s' is none of %s)")
633 633 % (section, name, config, validstr))
634 634 return config
635 635
636 636 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
637 637 if f2 is None:
638 638 f2 = f
639 639 return (repo.wvfs.audit.check(f)
640 640 and repo.wvfs.isfileorlink(f)
641 641 and repo.dirstate.normalize(f) not in repo.dirstate
642 642 and mctx[f2].cmp(wctx[f]))
643 643
644 644 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
645 645 """
646 646 Considers any actions that care about the presence of conflicting unknown
647 647 files. For some actions, the result is to abort; for others, it is to
648 648 choose a different action.
649 649 """
650 650 conflicts = set()
651 651 warnconflicts = set()
652 652 abortconflicts = set()
653 653 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
654 654 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
655 655 if not force:
656 656 def collectconflicts(conflicts, config):
657 657 if config == 'abort':
658 658 abortconflicts.update(conflicts)
659 659 elif config == 'warn':
660 660 warnconflicts.update(conflicts)
661 661
662 662 for f, (m, args, msg) in actions.iteritems():
663 663 if m in ('c', 'dc'):
664 664 if _checkunknownfile(repo, wctx, mctx, f):
665 665 conflicts.add(f)
666 666 elif m == 'dg':
667 667 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
668 668 conflicts.add(f)
669 669
670 670 ignoredconflicts = set([c for c in conflicts
671 671 if repo.dirstate._ignore(c)])
672 672 unknownconflicts = conflicts - ignoredconflicts
673 673 collectconflicts(ignoredconflicts, ignoredconfig)
674 674 collectconflicts(unknownconflicts, unknownconfig)
675 675 else:
676 676 for f, (m, args, msg) in actions.iteritems():
677 677 if m == 'cm':
678 678 fl2, anc = args
679 679 different = _checkunknownfile(repo, wctx, mctx, f)
680 680 if repo.dirstate._ignore(f):
681 681 config = ignoredconfig
682 682 else:
683 683 config = unknownconfig
684 684
685 685 # The behavior when force is True is described by this table:
686 686 # config different mergeforce | action backup
687 687 # * n * | get n
688 688 # * y y | merge -
689 689 # abort y n | merge - (1)
690 690 # warn y n | warn + get y
691 691 # ignore y n | get y
692 692 #
693 693 # (1) this is probably the wrong behavior here -- we should
694 694 # probably abort, but some actions like rebases currently
695 695 # don't like an abort happening in the middle of
696 696 # merge.update.
697 697 if not different:
698 698 actions[f] = ('g', (fl2, False), "remote created")
699 699 elif mergeforce or config == 'abort':
700 700 actions[f] = ('m', (f, f, None, False, anc),
701 701 "remote differs from untracked local")
702 702 elif config == 'abort':
703 703 abortconflicts.add(f)
704 704 else:
705 705 if config == 'warn':
706 706 warnconflicts.add(f)
707 707 actions[f] = ('g', (fl2, True), "remote created")
708 708
709 709 for f in sorted(abortconflicts):
710 710 repo.ui.warn(_("%s: untracked file differs\n") % f)
711 711 if abortconflicts:
712 712 raise error.Abort(_("untracked files in working directory "
713 713 "differ from files in requested revision"))
714 714
715 715 for f in sorted(warnconflicts):
716 716 repo.ui.warn(_("%s: replacing untracked file\n") % f)
717 717
718 718 for f, (m, args, msg) in actions.iteritems():
719 719 backup = f in conflicts
720 720 if m == 'c':
721 721 flags, = args
722 722 actions[f] = ('g', (flags, backup), msg)
723 723
724 724 def _forgetremoved(wctx, mctx, branchmerge):
725 725 """
726 726 Forget removed files
727 727
728 728 If we're jumping between revisions (as opposed to merging), and if
729 729 neither the working directory nor the target rev has the file,
730 730 then we need to remove it from the dirstate, to prevent the
731 731 dirstate from listing the file when it is no longer in the
732 732 manifest.
733 733
734 734 If we're merging, and the other revision has removed a file
735 735 that is not present in the working directory, we need to mark it
736 736 as removed.
737 737 """
738 738
739 739 actions = {}
740 740 m = 'f'
741 741 if branchmerge:
742 742 m = 'r'
743 743 for f in wctx.deleted():
744 744 if f not in mctx:
745 745 actions[f] = m, None, "forget deleted"
746 746
747 747 if not branchmerge:
748 748 for f in wctx.removed():
749 749 if f not in mctx:
750 750 actions[f] = 'f', None, "forget removed"
751 751
752 752 return actions
753 753
754 754 def _checkcollision(repo, wmf, actions):
755 755 # build provisional merged manifest up
756 756 pmmf = set(wmf)
757 757
758 758 if actions:
759 759 # k, dr, e and rd are no-op
760 760 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
761 761 for f, args, msg in actions[m]:
762 762 pmmf.add(f)
763 763 for f, args, msg in actions['r']:
764 764 pmmf.discard(f)
765 765 for f, args, msg in actions['dm']:
766 766 f2, flags = args
767 767 pmmf.discard(f2)
768 768 pmmf.add(f)
769 769 for f, args, msg in actions['dg']:
770 770 pmmf.add(f)
771 771 for f, args, msg in actions['m']:
772 772 f1, f2, fa, move, anc = args
773 773 if move:
774 774 pmmf.discard(f1)
775 775 pmmf.add(f)
776 776
777 777 # check case-folding collision in provisional merged manifest
778 778 foldmap = {}
779 779 for f in pmmf:
780 780 fold = util.normcase(f)
781 781 if fold in foldmap:
782 782 raise error.Abort(_("case-folding collision between %s and %s")
783 783 % (f, foldmap[fold]))
784 784 foldmap[fold] = f
785 785
786 786 # check case-folding of directories
787 787 foldprefix = unfoldprefix = lastfull = ''
788 788 for fold, f in sorted(foldmap.items()):
789 789 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
790 790 # the folded prefix matches but actual casing is different
791 791 raise error.Abort(_("case-folding collision between "
792 792 "%s and directory of %s") % (lastfull, f))
793 793 foldprefix = fold + '/'
794 794 unfoldprefix = f + '/'
795 795 lastfull = f
796 796
797 797 def driverpreprocess(repo, ms, wctx, labels=None):
798 798 """run the preprocess step of the merge driver, if any
799 799
800 800 This is currently not implemented -- it's an extension point."""
801 801 return True
802 802
803 803 def driverconclude(repo, ms, wctx, labels=None):
804 804 """run the conclude step of the merge driver, if any
805 805
806 806 This is currently not implemented -- it's an extension point."""
807 807 return True
808 808
809 809 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
810 810 acceptremote, followcopies, forcefulldiff=False):
811 811 """
812 812 Merge wctx and p2 with ancestor pa and generate merge action list
813 813
814 814 branchmerge and force are as passed in to update
815 815 matcher = matcher to filter file lists
816 816 acceptremote = accept the incoming changes without prompting
817 817 """
818 818 if matcher is not None and matcher.always():
819 819 matcher = None
820 820
821 821 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
822 822
823 823 # manifests fetched in order are going to be faster, so prime the caches
824 824 [x.manifest() for x in
825 825 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
826 826
827 827 if followcopies:
828 828 ret = copies.mergecopies(repo, wctx, p2, pa)
829 829 copy, movewithdir, diverge, renamedelete, dirmove = ret
830 830
831 831 boolbm = pycompat.bytestr(bool(branchmerge))
832 832 boolf = pycompat.bytestr(bool(force))
833 833 boolm = pycompat.bytestr(bool(matcher))
834 834 repo.ui.note(_("resolving manifests\n"))
835 835 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
836 836 % (boolbm, boolf, boolm))
837 837 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
838 838
839 839 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
840 840 copied = set(copy.values())
841 841 copied.update(movewithdir.values())
842 842
843 843 if '.hgsubstate' in m1:
844 844 # check whether sub state is modified
845 845 if any(wctx.sub(s).dirty() for s in wctx.substate):
846 846 m1['.hgsubstate'] = modifiednodeid
847 847
848 848 # Don't use m2-vs-ma optimization if:
849 849 # - ma is the same as m1 or m2, which we're just going to diff again later
850 850 # - The caller specifically asks for a full diff, which is useful during bid
851 851 # merge.
852 852 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
853 853 # Identify which files are relevant to the merge, so we can limit the
854 854 # total m1-vs-m2 diff to just those files. This has significant
855 855 # performance benefits in large repositories.
856 856 relevantfiles = set(ma.diff(m2).keys())
857 857
858 858 # For copied and moved files, we need to add the source file too.
859 859 for copykey, copyvalue in copy.iteritems():
860 860 if copyvalue in relevantfiles:
861 861 relevantfiles.add(copykey)
862 862 for movedirkey in movewithdir:
863 863 relevantfiles.add(movedirkey)
864 864 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
865 865 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
866 866
867 867 diff = m1.diff(m2, match=matcher)
868 868
869 869 if matcher is None:
870 870 matcher = matchmod.always('', '')
871 871
872 872 actions = {}
873 873 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
874 874 if n1 and n2: # file exists on both local and remote side
875 875 if f not in ma:
876 876 fa = copy.get(f, None)
877 877 if fa is not None:
878 878 actions[f] = ('m', (f, f, fa, False, pa.node()),
879 879 "both renamed from " + fa)
880 880 else:
881 881 actions[f] = ('m', (f, f, None, False, pa.node()),
882 882 "both created")
883 883 else:
884 884 a = ma[f]
885 885 fla = ma.flags(f)
886 886 nol = 'l' not in fl1 + fl2 + fla
887 887 if n2 == a and fl2 == fla:
888 888 actions[f] = ('k', (), "remote unchanged")
889 889 elif n1 == a and fl1 == fla: # local unchanged - use remote
890 890 if n1 == n2: # optimization: keep local content
891 891 actions[f] = ('e', (fl2,), "update permissions")
892 892 else:
893 893 actions[f] = ('g', (fl2, False), "remote is newer")
894 894 elif nol and n2 == a: # remote only changed 'x'
895 895 actions[f] = ('e', (fl2,), "update permissions")
896 896 elif nol and n1 == a: # local only changed 'x'
897 897 actions[f] = ('g', (fl1, False), "remote is newer")
898 898 else: # both changed something
899 899 actions[f] = ('m', (f, f, f, False, pa.node()),
900 900 "versions differ")
901 901 elif n1: # file exists only on local side
902 902 if f in copied:
903 903 pass # we'll deal with it on m2 side
904 904 elif f in movewithdir: # directory rename, move local
905 905 f2 = movewithdir[f]
906 906 if f2 in m2:
907 907 actions[f2] = ('m', (f, f2, None, True, pa.node()),
908 908 "remote directory rename, both created")
909 909 else:
910 910 actions[f2] = ('dm', (f, fl1),
911 911 "remote directory rename - move from " + f)
912 912 elif f in copy:
913 913 f2 = copy[f]
914 914 actions[f] = ('m', (f, f2, f2, False, pa.node()),
915 915 "local copied/moved from " + f2)
916 916 elif f in ma: # clean, a different, no remote
917 917 if n1 != ma[f]:
918 918 if acceptremote:
919 919 actions[f] = ('r', None, "remote delete")
920 920 else:
921 921 actions[f] = ('cd', (f, None, f, False, pa.node()),
922 922 "prompt changed/deleted")
923 923 elif n1 == addednodeid:
924 924 # This extra 'a' is added by working copy manifest to mark
925 925 # the file as locally added. We should forget it instead of
926 926 # deleting it.
927 927 actions[f] = ('f', None, "remote deleted")
928 928 else:
929 929 actions[f] = ('r', None, "other deleted")
930 930 elif n2: # file exists only on remote side
931 931 if f in copied:
932 932 pass # we'll deal with it on m1 side
933 933 elif f in movewithdir:
934 934 f2 = movewithdir[f]
935 935 if f2 in m1:
936 936 actions[f2] = ('m', (f2, f, None, False, pa.node()),
937 937 "local directory rename, both created")
938 938 else:
939 939 actions[f2] = ('dg', (f, fl2),
940 940 "local directory rename - get from " + f)
941 941 elif f in copy:
942 942 f2 = copy[f]
943 943 if f2 in m2:
944 944 actions[f] = ('m', (f2, f, f2, False, pa.node()),
945 945 "remote copied from " + f2)
946 946 else:
947 947 actions[f] = ('m', (f2, f, f2, True, pa.node()),
948 948 "remote moved from " + f2)
949 949 elif f not in ma:
950 950 # local unknown, remote created: the logic is described by the
951 951 # following table:
952 952 #
953 953 # force branchmerge different | action
954 954 # n * * | create
955 955 # y n * | create
956 956 # y y n | create
957 957 # y y y | merge
958 958 #
959 959 # Checking whether the files are different is expensive, so we
960 960 # don't do that when we can avoid it.
961 961 if not force:
962 962 actions[f] = ('c', (fl2,), "remote created")
963 963 elif not branchmerge:
964 964 actions[f] = ('c', (fl2,), "remote created")
965 965 else:
966 966 actions[f] = ('cm', (fl2, pa.node()),
967 967 "remote created, get or merge")
968 968 elif n2 != ma[f]:
969 969 df = None
970 970 for d in dirmove:
971 971 if f.startswith(d):
972 972 # new file added in a directory that was moved
973 973 df = dirmove[d] + f[len(d):]
974 974 break
975 975 if df is not None and df in m1:
976 976 actions[df] = ('m', (df, f, f, False, pa.node()),
977 977 "local directory rename - respect move from " + f)
978 978 elif acceptremote:
979 979 actions[f] = ('c', (fl2,), "remote recreating")
980 980 else:
981 981 actions[f] = ('dc', (None, f, f, False, pa.node()),
982 982 "prompt deleted/changed")
983 983
984 984 return actions, diverge, renamedelete
985 985
986 986 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
987 987 """Resolves false conflicts where the nodeid changed but the content
988 988 remained the same."""
989 989
990 990 for f, (m, args, msg) in actions.items():
991 991 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
992 992 # local did change but ended up with same content
993 993 actions[f] = 'r', None, "prompt same"
994 994 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
995 995 # remote did change but ended up with same content
996 996 del actions[f] # don't get = keep local deleted
997 997
998 998 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
999 999 acceptremote, followcopies, matcher=None,
1000 1000 mergeforce=False):
1001 1001 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1002 1002 # Avoid cycle.
1003 1003 from . import sparse
1004 1004
1005 1005 if len(ancestors) == 1: # default
1006 1006 actions, diverge, renamedelete = manifestmerge(
1007 1007 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1008 1008 acceptremote, followcopies)
1009 1009 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1010 1010
1011 1011 else: # only when merge.preferancestor=* - the default
1012 1012 repo.ui.note(
1013 1013 _("note: merging %s and %s using bids from ancestors %s\n") %
1014 1014 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1015 1015 for anc in ancestors)))
1016 1016
1017 1017 # Call for bids
1018 1018 fbids = {} # mapping filename to bids (action method to list af actions)
1019 1019 diverge, renamedelete = None, None
1020 1020 for ancestor in ancestors:
1021 1021 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1022 1022 actions, diverge1, renamedelete1 = manifestmerge(
1023 1023 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1024 1024 acceptremote, followcopies, forcefulldiff=True)
1025 1025 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1026 1026
1027 1027 # Track the shortest set of warning on the theory that bid
1028 1028 # merge will correctly incorporate more information
1029 1029 if diverge is None or len(diverge1) < len(diverge):
1030 1030 diverge = diverge1
1031 1031 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1032 1032 renamedelete = renamedelete1
1033 1033
1034 1034 for f, a in sorted(actions.iteritems()):
1035 1035 m, args, msg = a
1036 1036 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1037 1037 if f in fbids:
1038 1038 d = fbids[f]
1039 1039 if m in d:
1040 1040 d[m].append(a)
1041 1041 else:
1042 1042 d[m] = [a]
1043 1043 else:
1044 1044 fbids[f] = {m: [a]}
1045 1045
1046 1046 # Pick the best bid for each file
1047 1047 repo.ui.note(_('\nauction for merging merge bids\n'))
1048 1048 actions = {}
1049 1049 dms = [] # filenames that have dm actions
1050 1050 for f, bids in sorted(fbids.items()):
1051 1051 # bids is a mapping from action method to list af actions
1052 1052 # Consensus?
1053 1053 if len(bids) == 1: # all bids are the same kind of method
1054 1054 m, l = list(bids.items())[0]
1055 1055 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1056 1056 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1057 1057 actions[f] = l[0]
1058 1058 if m == 'dm':
1059 1059 dms.append(f)
1060 1060 continue
1061 1061 # If keep is an option, just do it.
1062 1062 if 'k' in bids:
1063 1063 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1064 1064 actions[f] = bids['k'][0]
1065 1065 continue
1066 1066 # If there are gets and they all agree [how could they not?], do it.
1067 1067 if 'g' in bids:
1068 1068 ga0 = bids['g'][0]
1069 1069 if all(a == ga0 for a in bids['g'][1:]):
1070 1070 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1071 1071 actions[f] = ga0
1072 1072 continue
1073 1073 # TODO: Consider other simple actions such as mode changes
1074 1074 # Handle inefficient democrazy.
1075 1075 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1076 1076 for m, l in sorted(bids.items()):
1077 1077 for _f, args, msg in l:
1078 1078 repo.ui.note(' %s -> %s\n' % (msg, m))
1079 1079 # Pick random action. TODO: Instead, prompt user when resolving
1080 1080 m, l = list(bids.items())[0]
1081 1081 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1082 1082 (f, m))
1083 1083 actions[f] = l[0]
1084 1084 if m == 'dm':
1085 1085 dms.append(f)
1086 1086 continue
1087 1087 # Work around 'dm' that can cause multiple actions for the same file
1088 1088 for f in dms:
1089 1089 dm, (f0, flags), msg = actions[f]
1090 1090 assert dm == 'dm', dm
1091 1091 if f0 in actions and actions[f0][0] == 'r':
1092 1092 # We have one bid for removing a file and another for moving it.
1093 1093 # These two could be merged as first move and then delete ...
1094 1094 # but instead drop moving and just delete.
1095 1095 del actions[f]
1096 1096 repo.ui.note(_('end of auction\n\n'))
1097 1097
1098 1098 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1099 1099
1100 1100 if wctx.rev() is None:
1101 1101 fractions = _forgetremoved(wctx, mctx, branchmerge)
1102 1102 actions.update(fractions)
1103 1103
1104 1104 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1105 1105 actions)
1106 1106
1107 1107 return prunedactions, diverge, renamedelete
1108 1108
1109 1109 def _getcwd():
1110 1110 try:
1111 1111 return pycompat.getcwd()
1112 1112 except OSError as err:
1113 1113 if err.errno == errno.ENOENT:
1114 1114 return None
1115 1115 raise
1116 1116
1117 1117 def batchremove(repo, wctx, actions):
1118 1118 """apply removes to the working directory
1119 1119
1120 1120 yields tuples for progress updates
1121 1121 """
1122 1122 verbose = repo.ui.verbose
1123 1123 cwd = _getcwd()
1124 1124 i = 0
1125 1125 for f, args, msg in actions:
1126 1126 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1127 1127 if verbose:
1128 1128 repo.ui.note(_("removing %s\n") % f)
1129 1129 wctx[f].audit()
1130 1130 try:
1131 1131 wctx[f].remove(ignoremissing=True)
1132 1132 except OSError as inst:
1133 1133 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1134 1134 (f, inst.strerror))
1135 1135 if i == 100:
1136 1136 yield i, f
1137 1137 i = 0
1138 1138 i += 1
1139 1139 if i > 0:
1140 1140 yield i, f
1141 1141
1142 1142 if cwd and not _getcwd():
1143 1143 # cwd was removed in the course of removing files; print a helpful
1144 1144 # warning.
1145 1145 repo.ui.warn(_("current directory was removed\n"
1146 1146 "(consider changing to repo root: %s)\n") % repo.root)
1147 1147
1148 1148 # It's necessary to flush here in case we're inside a worker fork and will
1149 1149 # quit after this function.
1150 1150 wctx.flushall()
1151 1151
1152 1152 def batchget(repo, mctx, wctx, actions):
1153 1153 """apply gets to the working directory
1154 1154
1155 1155 mctx is the context to get from
1156 1156
1157 1157 yields tuples for progress updates
1158 1158 """
1159 1159 verbose = repo.ui.verbose
1160 1160 fctx = mctx.filectx
1161 1161 ui = repo.ui
1162 1162 i = 0
1163 1163 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1164 1164 for f, (flags, backup), msg in actions:
1165 1165 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1166 1166 if verbose:
1167 1167 repo.ui.note(_("getting %s\n") % f)
1168 1168
1169 1169 if backup:
1170 1170 absf = repo.wjoin(f)
1171 1171 orig = scmutil.origpath(ui, repo, absf)
1172 1172 try:
1173 1173 if repo.wvfs.isfileorlink(f):
1174 1174 util.rename(absf, orig)
1175 1175 except OSError as e:
1176 1176 if e.errno != errno.ENOENT:
1177 1177 raise
1178 1178 wctx[f].clearunknown()
1179 1179 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1180 1180 if i == 100:
1181 1181 yield i, f
1182 1182 i = 0
1183 1183 i += 1
1184 1184 if i > 0:
1185 1185 yield i, f
1186 1186
1187 1187 # It's necessary to flush here in case we're inside a worker fork and will
1188 1188 # quit after this function.
1189 1189 wctx.flushall()
1190 1190
1191 1191 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1192 1192 """apply the merge action list to the working directory
1193 1193
1194 1194 wctx is the working copy context
1195 1195 mctx is the context to be merged into the working copy
1196 1196
1197 1197 Return a tuple of counts (updated, merged, removed, unresolved) that
1198 1198 describes how many files were affected by the update.
1199 1199 """
1200 1200
1201 1201 updated, merged, removed = 0, 0, 0
1202 1202 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1203 1203 moves = []
1204 1204 for m, l in actions.items():
1205 1205 l.sort()
1206 1206
1207 1207 # 'cd' and 'dc' actions are treated like other merge conflicts
1208 1208 mergeactions = sorted(actions['cd'])
1209 1209 mergeactions.extend(sorted(actions['dc']))
1210 1210 mergeactions.extend(actions['m'])
1211 1211 for f, args, msg in mergeactions:
1212 1212 f1, f2, fa, move, anc = args
1213 1213 if f == '.hgsubstate': # merged internally
1214 1214 continue
1215 1215 if f1 is None:
1216 1216 fcl = filemerge.absentfilectx(wctx, fa)
1217 1217 else:
1218 1218 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1219 1219 fcl = wctx[f1]
1220 1220 if f2 is None:
1221 1221 fco = filemerge.absentfilectx(mctx, fa)
1222 1222 else:
1223 1223 fco = mctx[f2]
1224 1224 actx = repo[anc]
1225 1225 if fa in actx:
1226 1226 fca = actx[fa]
1227 1227 else:
1228 1228 # TODO: move to absentfilectx
1229 1229 fca = repo.filectx(f1, fileid=nullrev)
1230 1230 ms.add(fcl, fco, fca, f)
1231 1231 if f1 != f and move:
1232 1232 moves.append(f1)
1233 1233
1234 1234 _updating = _('updating')
1235 1235 _files = _('files')
1236 1236 progress = repo.ui.progress
1237 1237
1238 1238 # remove renamed files after safely stored
1239 1239 for f in moves:
1240 1240 if wctx[f].lexists():
1241 1241 repo.ui.debug("removing %s\n" % f)
1242 1242 wctx[f].audit()
1243 1243 wctx[f].remove()
1244 1244
1245 1245 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1246 z = 0
1246 1247
1247 1248 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1248 1249 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1249 1250
1250 # remove in parallel (must come first)
1251 z = 0
1251 # record path conflicts
1252 for f, args, msg in actions['p']:
1253 f1, fo = args
1254 s = repo.ui.status
1255 s(_("%s: path conflict - a file or link has the same name as a "
1256 "directory\n") % f)
1257 if fo == 'l':
1258 s(_("the local file has been renamed to %s\n") % f1)
1259 else:
1260 s(_("the remote file has been renamed to %s\n") % f1)
1261 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1262 ms.addpath(f, f1, fo)
1263 z += 1
1264 progress(_updating, z, item=f, total=numupdates, unit=_files)
1265
1266 # remove in parallel (must come before getting)
1252 1267 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1253 1268 actions['r'])
1254 1269 for i, item in prog:
1255 1270 z += i
1256 1271 progress(_updating, z, item=item, total=numupdates, unit=_files)
1257 1272 removed = len(actions['r'])
1258 1273
1259 1274 # We should flush before forking into worker processes, since those workers
1260 1275 # flush when they complete, and we don't want to duplicate work.
1261 1276 wctx.flushall()
1262 1277
1263 1278 # get in parallel
1264 1279 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1265 1280 actions['g'])
1266 1281 for i, item in prog:
1267 1282 z += i
1268 1283 progress(_updating, z, item=item, total=numupdates, unit=_files)
1269 1284 updated = len(actions['g'])
1270 1285
1271 1286 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1272 1287 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1273 1288
1274 1289 # forget (manifest only, just log it) (must come first)
1275 1290 for f, args, msg in actions['f']:
1276 1291 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1277 1292 z += 1
1278 1293 progress(_updating, z, item=f, total=numupdates, unit=_files)
1279 1294
1280 1295 # re-add (manifest only, just log it)
1281 1296 for f, args, msg in actions['a']:
1282 1297 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1283 1298 z += 1
1284 1299 progress(_updating, z, item=f, total=numupdates, unit=_files)
1285 1300
1286 1301 # re-add/mark as modified (manifest only, just log it)
1287 1302 for f, args, msg in actions['am']:
1288 1303 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1289 1304 z += 1
1290 1305 progress(_updating, z, item=f, total=numupdates, unit=_files)
1291 1306
1292 1307 # keep (noop, just log it)
1293 1308 for f, args, msg in actions['k']:
1294 1309 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1295 1310 # no progress
1296 1311
1297 1312 # directory rename, move local
1298 1313 for f, args, msg in actions['dm']:
1299 1314 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1300 1315 z += 1
1301 1316 progress(_updating, z, item=f, total=numupdates, unit=_files)
1302 1317 f0, flags = args
1303 1318 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1304 1319 wctx[f].audit()
1305 1320 wctx[f].write(wctx.filectx(f0).data(), flags)
1306 1321 wctx[f0].remove()
1307 1322 updated += 1
1308 1323
1309 1324 # local directory rename, get
1310 1325 for f, args, msg in actions['dg']:
1311 1326 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1312 1327 z += 1
1313 1328 progress(_updating, z, item=f, total=numupdates, unit=_files)
1314 1329 f0, flags = args
1315 1330 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1316 1331 wctx[f].write(mctx.filectx(f0).data(), flags)
1317 1332 updated += 1
1318 1333
1319 1334 # exec
1320 1335 for f, args, msg in actions['e']:
1321 1336 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1322 1337 z += 1
1323 1338 progress(_updating, z, item=f, total=numupdates, unit=_files)
1324 1339 flags, = args
1325 1340 wctx[f].audit()
1326 1341 wctx[f].setflags('l' in flags, 'x' in flags)
1327 1342 updated += 1
1328 1343
1329 1344 # the ordering is important here -- ms.mergedriver will raise if the merge
1330 1345 # driver has changed, and we want to be able to bypass it when overwrite is
1331 1346 # True
1332 1347 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1333 1348
1334 1349 if usemergedriver:
1335 1350 ms.commit()
1336 1351 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1337 1352 # the driver might leave some files unresolved
1338 1353 unresolvedf = set(ms.unresolved())
1339 1354 if not proceed:
1340 1355 # XXX setting unresolved to at least 1 is a hack to make sure we
1341 1356 # error out
1342 1357 return updated, merged, removed, max(len(unresolvedf), 1)
1343 1358 newactions = []
1344 1359 for f, args, msg in mergeactions:
1345 1360 if f in unresolvedf:
1346 1361 newactions.append((f, args, msg))
1347 1362 mergeactions = newactions
1348 1363
1349 1364 # premerge
1350 1365 tocomplete = []
1351 1366 for f, args, msg in mergeactions:
1352 1367 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1353 1368 z += 1
1354 1369 progress(_updating, z, item=f, total=numupdates, unit=_files)
1355 1370 if f == '.hgsubstate': # subrepo states need updating
1356 1371 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1357 1372 overwrite, labels)
1358 1373 continue
1359 1374 wctx[f].audit()
1360 1375 complete, r = ms.preresolve(f, wctx)
1361 1376 if not complete:
1362 1377 numupdates += 1
1363 1378 tocomplete.append((f, args, msg))
1364 1379
1365 1380 # merge
1366 1381 for f, args, msg in tocomplete:
1367 1382 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1368 1383 z += 1
1369 1384 progress(_updating, z, item=f, total=numupdates, unit=_files)
1370 1385 ms.resolve(f, wctx)
1371 1386
1372 1387 ms.commit()
1373 1388
1374 1389 unresolved = ms.unresolvedcount()
1375 1390
1376 1391 if usemergedriver and not unresolved and ms.mdstate() != 's':
1377 1392 if not driverconclude(repo, ms, wctx, labels=labels):
1378 1393 # XXX setting unresolved to at least 1 is a hack to make sure we
1379 1394 # error out
1380 1395 unresolved = max(unresolved, 1)
1381 1396
1382 1397 ms.commit()
1383 1398
1384 1399 msupdated, msmerged, msremoved = ms.counts()
1385 1400 updated += msupdated
1386 1401 merged += msmerged
1387 1402 removed += msremoved
1388 1403
1389 1404 extraactions = ms.actions()
1390 1405 if extraactions:
1391 1406 mfiles = set(a[0] for a in actions['m'])
1392 1407 for k, acts in extraactions.iteritems():
1393 1408 actions[k].extend(acts)
1394 1409 # Remove these files from actions['m'] as well. This is important
1395 1410 # because in recordupdates, files in actions['m'] are processed
1396 1411 # after files in other actions, and the merge driver might add
1397 1412 # files to those actions via extraactions above. This can lead to a
1398 1413 # file being recorded twice, with poor results. This is especially
1399 1414 # problematic for actions['r'] (currently only possible with the
1400 1415 # merge driver in the initial merge process; interrupted merges
1401 1416 # don't go through this flow).
1402 1417 #
1403 1418 # The real fix here is to have indexes by both file and action so
1404 1419 # that when the action for a file is changed it is automatically
1405 1420 # reflected in the other action lists. But that involves a more
1406 1421 # complex data structure, so this will do for now.
1407 1422 #
1408 1423 # We don't need to do the same operation for 'dc' and 'cd' because
1409 1424 # those lists aren't consulted again.
1410 1425 mfiles.difference_update(a[0] for a in acts)
1411 1426
1412 1427 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1413 1428
1414 1429 progress(_updating, None, total=numupdates, unit=_files)
1415 1430
1416 1431 return updated, merged, removed, unresolved
1417 1432
1418 1433 def recordupdates(repo, actions, branchmerge):
1419 1434 "record merge actions to the dirstate"
1420 1435 # remove (must come first)
1421 1436 for f, args, msg in actions.get('r', []):
1422 1437 if branchmerge:
1423 1438 repo.dirstate.remove(f)
1424 1439 else:
1425 1440 repo.dirstate.drop(f)
1426 1441
1427 1442 # forget (must come first)
1428 1443 for f, args, msg in actions.get('f', []):
1429 1444 repo.dirstate.drop(f)
1430 1445
1431 1446 # re-add
1432 1447 for f, args, msg in actions.get('a', []):
1433 1448 repo.dirstate.add(f)
1434 1449
1435 1450 # re-add/mark as modified
1436 1451 for f, args, msg in actions.get('am', []):
1437 1452 if branchmerge:
1438 1453 repo.dirstate.normallookup(f)
1439 1454 else:
1440 1455 repo.dirstate.add(f)
1441 1456
1442 1457 # exec change
1443 1458 for f, args, msg in actions.get('e', []):
1444 1459 repo.dirstate.normallookup(f)
1445 1460
1446 1461 # keep
1447 1462 for f, args, msg in actions.get('k', []):
1448 1463 pass
1449 1464
1450 1465 # get
1451 1466 for f, args, msg in actions.get('g', []):
1452 1467 if branchmerge:
1453 1468 repo.dirstate.otherparent(f)
1454 1469 else:
1455 1470 repo.dirstate.normal(f)
1456 1471
1457 1472 # merge
1458 1473 for f, args, msg in actions.get('m', []):
1459 1474 f1, f2, fa, move, anc = args
1460 1475 if branchmerge:
1461 1476 # We've done a branch merge, mark this file as merged
1462 1477 # so that we properly record the merger later
1463 1478 repo.dirstate.merge(f)
1464 1479 if f1 != f2: # copy/rename
1465 1480 if move:
1466 1481 repo.dirstate.remove(f1)
1467 1482 if f1 != f:
1468 1483 repo.dirstate.copy(f1, f)
1469 1484 else:
1470 1485 repo.dirstate.copy(f2, f)
1471 1486 else:
1472 1487 # We've update-merged a locally modified file, so
1473 1488 # we set the dirstate to emulate a normal checkout
1474 1489 # of that file some time in the past. Thus our
1475 1490 # merge will appear as a normal local file
1476 1491 # modification.
1477 1492 if f2 == f: # file not locally copied/moved
1478 1493 repo.dirstate.normallookup(f)
1479 1494 if move:
1480 1495 repo.dirstate.drop(f1)
1481 1496
1482 1497 # directory rename, move local
1483 1498 for f, args, msg in actions.get('dm', []):
1484 1499 f0, flag = args
1485 1500 if branchmerge:
1486 1501 repo.dirstate.add(f)
1487 1502 repo.dirstate.remove(f0)
1488 1503 repo.dirstate.copy(f0, f)
1489 1504 else:
1490 1505 repo.dirstate.normal(f)
1491 1506 repo.dirstate.drop(f0)
1492 1507
1493 1508 # directory rename, get
1494 1509 for f, args, msg in actions.get('dg', []):
1495 1510 f0, flag = args
1496 1511 if branchmerge:
1497 1512 repo.dirstate.add(f)
1498 1513 repo.dirstate.copy(f0, f)
1499 1514 else:
1500 1515 repo.dirstate.normal(f)
1501 1516
1502 1517 def update(repo, node, branchmerge, force, ancestor=None,
1503 1518 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1504 1519 updatecheck=None, wc=None):
1505 1520 """
1506 1521 Perform a merge between the working directory and the given node
1507 1522
1508 1523 node = the node to update to
1509 1524 branchmerge = whether to merge between branches
1510 1525 force = whether to force branch merging or file overwriting
1511 1526 matcher = a matcher to filter file lists (dirstate not updated)
1512 1527 mergeancestor = whether it is merging with an ancestor. If true,
1513 1528 we should accept the incoming changes for any prompts that occur.
1514 1529 If false, merging with an ancestor (fast-forward) is only allowed
1515 1530 between different named branches. This flag is used by rebase extension
1516 1531 as a temporary fix and should be avoided in general.
1517 1532 labels = labels to use for base, local and other
1518 1533 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1519 1534 this is True, then 'force' should be True as well.
1520 1535
1521 1536 The table below shows all the behaviors of the update command
1522 1537 given the -c and -C or no options, whether the working directory
1523 1538 is dirty, whether a revision is specified, and the relationship of
1524 1539 the parent rev to the target rev (linear or not). Match from top first. The
1525 1540 -n option doesn't exist on the command line, but represents the
1526 1541 experimental.updatecheck=noconflict option.
1527 1542
1528 1543 This logic is tested by test-update-branches.t.
1529 1544
1530 1545 -c -C -n -m dirty rev linear | result
1531 1546 y y * * * * * | (1)
1532 1547 y * y * * * * | (1)
1533 1548 y * * y * * * | (1)
1534 1549 * y y * * * * | (1)
1535 1550 * y * y * * * | (1)
1536 1551 * * y y * * * | (1)
1537 1552 * * * * * n n | x
1538 1553 * * * * n * * | ok
1539 1554 n n n n y * y | merge
1540 1555 n n n n y y n | (2)
1541 1556 n n n y y * * | merge
1542 1557 n n y n y * * | merge if no conflict
1543 1558 n y n n y * * | discard
1544 1559 y n n n y * * | (3)
1545 1560
1546 1561 x = can't happen
1547 1562 * = don't-care
1548 1563 1 = incompatible options (checked in commands.py)
1549 1564 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1550 1565 3 = abort: uncommitted changes (checked in commands.py)
1551 1566
1552 1567 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1553 1568 to repo[None] if None is passed.
1554 1569
1555 1570 Return the same tuple as applyupdates().
1556 1571 """
1557 1572 # Avoid cycle.
1558 1573 from . import sparse
1559 1574
1560 1575 # This function used to find the default destination if node was None, but
1561 1576 # that's now in destutil.py.
1562 1577 assert node is not None
1563 1578 if not branchmerge and not force:
1564 1579 # TODO: remove the default once all callers that pass branchmerge=False
1565 1580 # and force=False pass a value for updatecheck. We may want to allow
1566 1581 # updatecheck='abort' to better suppport some of these callers.
1567 1582 if updatecheck is None:
1568 1583 updatecheck = 'linear'
1569 1584 assert updatecheck in ('none', 'linear', 'noconflict')
1570 1585 # If we're doing a partial update, we need to skip updating
1571 1586 # the dirstate, so make a note of any partial-ness to the
1572 1587 # update here.
1573 1588 if matcher is None or matcher.always():
1574 1589 partial = False
1575 1590 else:
1576 1591 partial = True
1577 1592 with repo.wlock():
1578 1593 if wc is None:
1579 1594 wc = repo[None]
1580 1595 pl = wc.parents()
1581 1596 p1 = pl[0]
1582 1597 pas = [None]
1583 1598 if ancestor is not None:
1584 1599 pas = [repo[ancestor]]
1585 1600
1586 1601 overwrite = force and not branchmerge
1587 1602
1588 1603 p2 = repo[node]
1589 1604 if pas[0] is None:
1590 1605 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1591 1606 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1592 1607 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1593 1608 else:
1594 1609 pas = [p1.ancestor(p2, warn=branchmerge)]
1595 1610
1596 1611 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1597 1612
1598 1613 ### check phase
1599 1614 if not overwrite:
1600 1615 if len(pl) > 1:
1601 1616 raise error.Abort(_("outstanding uncommitted merge"))
1602 1617 ms = mergestate.read(repo)
1603 1618 if list(ms.unresolved()):
1604 1619 raise error.Abort(_("outstanding merge conflicts"))
1605 1620 if branchmerge:
1606 1621 if pas == [p2]:
1607 1622 raise error.Abort(_("merging with a working directory ancestor"
1608 1623 " has no effect"))
1609 1624 elif pas == [p1]:
1610 1625 if not mergeancestor and wc.branch() == p2.branch():
1611 1626 raise error.Abort(_("nothing to merge"),
1612 1627 hint=_("use 'hg update' "
1613 1628 "or check 'hg heads'"))
1614 1629 if not force and (wc.files() or wc.deleted()):
1615 1630 raise error.Abort(_("uncommitted changes"),
1616 1631 hint=_("use 'hg status' to list changes"))
1617 1632 for s in sorted(wc.substate):
1618 1633 wc.sub(s).bailifchanged()
1619 1634
1620 1635 elif not overwrite:
1621 1636 if p1 == p2: # no-op update
1622 1637 # call the hooks and exit early
1623 1638 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1624 1639 repo.hook('update', parent1=xp2, parent2='', error=0)
1625 1640 return 0, 0, 0, 0
1626 1641
1627 1642 if (updatecheck == 'linear' and
1628 1643 pas not in ([p1], [p2])): # nonlinear
1629 1644 dirty = wc.dirty(missing=True)
1630 1645 if dirty:
1631 1646 # Branching is a bit strange to ensure we do the minimal
1632 1647 # amount of call to obsutil.foreground.
1633 1648 foreground = obsutil.foreground(repo, [p1.node()])
1634 1649 # note: the <node> variable contains a random identifier
1635 1650 if repo[node].node() in foreground:
1636 1651 pass # allow updating to successors
1637 1652 else:
1638 1653 msg = _("uncommitted changes")
1639 1654 hint = _("commit or update --clean to discard changes")
1640 1655 raise error.UpdateAbort(msg, hint=hint)
1641 1656 else:
1642 1657 # Allow jumping branches if clean and specific rev given
1643 1658 pass
1644 1659
1645 1660 if overwrite:
1646 1661 pas = [wc]
1647 1662 elif not branchmerge:
1648 1663 pas = [p1]
1649 1664
1650 1665 # deprecated config: merge.followcopies
1651 1666 followcopies = repo.ui.configbool('merge', 'followcopies')
1652 1667 if overwrite:
1653 1668 followcopies = False
1654 1669 elif not pas[0]:
1655 1670 followcopies = False
1656 1671 if not branchmerge and not wc.dirty(missing=True):
1657 1672 followcopies = False
1658 1673
1659 1674 ### calculate phase
1660 1675 actionbyfile, diverge, renamedelete = calculateupdates(
1661 1676 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1662 1677 followcopies, matcher=matcher, mergeforce=mergeforce)
1663 1678
1664 1679 if updatecheck == 'noconflict':
1665 1680 for f, (m, args, msg) in actionbyfile.iteritems():
1666 1681 if m not in ('g', 'k', 'e', 'r'):
1667 1682 msg = _("conflicting changes")
1668 1683 hint = _("commit or update --clean to discard changes")
1669 1684 raise error.Abort(msg, hint=hint)
1670 1685
1671 1686 # Prompt and create actions. Most of this is in the resolve phase
1672 1687 # already, but we can't handle .hgsubstate in filemerge or
1673 1688 # subrepo.submerge yet so we have to keep prompting for it.
1674 1689 if '.hgsubstate' in actionbyfile:
1675 1690 f = '.hgsubstate'
1676 1691 m, args, msg = actionbyfile[f]
1677 1692 prompts = filemerge.partextras(labels)
1678 1693 prompts['f'] = f
1679 1694 if m == 'cd':
1680 1695 if repo.ui.promptchoice(
1681 1696 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1682 1697 "use (c)hanged version or (d)elete?"
1683 1698 "$$ &Changed $$ &Delete") % prompts, 0):
1684 1699 actionbyfile[f] = ('r', None, "prompt delete")
1685 1700 elif f in p1:
1686 1701 actionbyfile[f] = ('am', None, "prompt keep")
1687 1702 else:
1688 1703 actionbyfile[f] = ('a', None, "prompt keep")
1689 1704 elif m == 'dc':
1690 1705 f1, f2, fa, move, anc = args
1691 1706 flags = p2[f2].flags()
1692 1707 if repo.ui.promptchoice(
1693 1708 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1694 1709 "use (c)hanged version or leave (d)eleted?"
1695 1710 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1696 1711 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1697 1712 else:
1698 1713 del actionbyfile[f]
1699 1714
1700 1715 # Convert to dictionary-of-lists format
1701 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1716 actions = dict((m, [])
1717 for m in 'a am f g cd dc r dm dg m e k p'.split())
1702 1718 for f, (m, args, msg) in actionbyfile.iteritems():
1703 1719 if m not in actions:
1704 1720 actions[m] = []
1705 1721 actions[m].append((f, args, msg))
1706 1722
1707 1723 if not util.fscasesensitive(repo.path):
1708 1724 # check collision between files only in p2 for clean update
1709 1725 if (not branchmerge and
1710 1726 (force or not wc.dirty(missing=True, branch=False))):
1711 1727 _checkcollision(repo, p2.manifest(), None)
1712 1728 else:
1713 1729 _checkcollision(repo, wc.manifest(), actions)
1714 1730
1715 1731 # divergent renames
1716 1732 for f, fl in sorted(diverge.iteritems()):
1717 1733 repo.ui.warn(_("note: possible conflict - %s was renamed "
1718 1734 "multiple times to:\n") % f)
1719 1735 for nf in fl:
1720 1736 repo.ui.warn(" %s\n" % nf)
1721 1737
1722 1738 # rename and delete
1723 1739 for f, fl in sorted(renamedelete.iteritems()):
1724 1740 repo.ui.warn(_("note: possible conflict - %s was deleted "
1725 1741 "and renamed to:\n") % f)
1726 1742 for nf in fl:
1727 1743 repo.ui.warn(" %s\n" % nf)
1728 1744
1729 1745 ### apply phase
1730 1746 if not branchmerge: # just jump to the new rev
1731 1747 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1732 1748 if not partial:
1733 1749 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1734 1750 # note that we're in the middle of an update
1735 1751 repo.vfs.write('updatestate', p2.hex())
1736 1752
1737 1753 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1738 1754 wc.flushall()
1739 1755
1740 1756 if not partial:
1741 1757 with repo.dirstate.parentchange():
1742 1758 repo.setparents(fp1, fp2)
1743 1759 recordupdates(repo, actions, branchmerge)
1744 1760 # update completed, clear state
1745 1761 util.unlink(repo.vfs.join('updatestate'))
1746 1762
1747 1763 if not branchmerge:
1748 1764 repo.dirstate.setbranch(p2.branch())
1749 1765
1750 1766 # If we're updating to a location, clean up any stale temporary includes
1751 1767 # (ex: this happens during hg rebase --abort).
1752 1768 if not branchmerge:
1753 1769 sparse.prunetemporaryincludes(repo)
1754 1770
1755 1771 if not partial:
1756 1772 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1757 1773 return stats
1758 1774
1759 1775 def graft(repo, ctx, pctx, labels, keepparent=False):
1760 1776 """Do a graft-like merge.
1761 1777
1762 1778 This is a merge where the merge ancestor is chosen such that one
1763 1779 or more changesets are grafted onto the current changeset. In
1764 1780 addition to the merge, this fixes up the dirstate to include only
1765 1781 a single parent (if keepparent is False) and tries to duplicate any
1766 1782 renames/copies appropriately.
1767 1783
1768 1784 ctx - changeset to rebase
1769 1785 pctx - merge base, usually ctx.p1()
1770 1786 labels - merge labels eg ['local', 'graft']
1771 1787 keepparent - keep second parent if any
1772 1788
1773 1789 """
1774 1790 # If we're grafting a descendant onto an ancestor, be sure to pass
1775 1791 # mergeancestor=True to update. This does two things: 1) allows the merge if
1776 1792 # the destination is the same as the parent of the ctx (so we can use graft
1777 1793 # to copy commits), and 2) informs update that the incoming changes are
1778 1794 # newer than the destination so it doesn't prompt about "remote changed foo
1779 1795 # which local deleted".
1780 1796 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1781 1797
1782 1798 stats = update(repo, ctx.node(), True, True, pctx.node(),
1783 1799 mergeancestor=mergeancestor, labels=labels)
1784 1800
1785 1801 pother = nullid
1786 1802 parents = ctx.parents()
1787 1803 if keepparent and len(parents) == 2 and pctx in parents:
1788 1804 parents.remove(pctx)
1789 1805 pother = parents[0].node()
1790 1806
1791 1807 with repo.dirstate.parentchange():
1792 1808 repo.setparents(repo['.'].node(), pother)
1793 1809 repo.dirstate.write(repo.currenttransaction())
1794 1810 # fix up dirstate for copies and renames
1795 1811 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1796 1812 return stats
@@ -1,704 +1,705 b''
1 1 # sparse.py - functionality for sparse checkouts
2 2 #
3 3 # Copyright 2014 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12 import os
13 13
14 14 from .i18n import _
15 15 from .node import nullid
16 16 from . import (
17 17 error,
18 18 match as matchmod,
19 19 merge as mergemod,
20 20 pathutil,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25
26 26 # Whether sparse features are enabled. This variable is intended to be
27 27 # temporary to facilitate porting sparse to core. It should eventually be
28 28 # a per-repo option, possibly a repo requirement.
29 29 enabled = False
30 30
31 31 def parseconfig(ui, raw):
32 32 """Parse sparse config file content.
33 33
34 34 Returns a tuple of includes, excludes, and profiles.
35 35 """
36 36 includes = set()
37 37 excludes = set()
38 38 profiles = set()
39 39 current = None
40 40 havesection = False
41 41
42 42 for line in raw.split('\n'):
43 43 line = line.strip()
44 44 if not line or line.startswith('#'):
45 45 # empty or comment line, skip
46 46 continue
47 47 elif line.startswith('%include '):
48 48 line = line[9:].strip()
49 49 if line:
50 50 profiles.add(line)
51 51 elif line == '[include]':
52 52 if havesection and current != includes:
53 53 # TODO pass filename into this API so we can report it.
54 54 raise error.Abort(_('sparse config cannot have includes ' +
55 55 'after excludes'))
56 56 havesection = True
57 57 current = includes
58 58 continue
59 59 elif line == '[exclude]':
60 60 havesection = True
61 61 current = excludes
62 62 elif line:
63 63 if current is None:
64 64 raise error.Abort(_('sparse config entry outside of '
65 65 'section: %s') % line,
66 66 hint=_('add an [include] or [exclude] line '
67 67 'to declare the entry type'))
68 68
69 69 if line.strip().startswith('/'):
70 70 ui.warn(_('warning: sparse profile cannot use' +
71 71 ' paths starting with /, ignoring %s\n') % line)
72 72 continue
73 73 current.add(line)
74 74
75 75 return includes, excludes, profiles
76 76
77 77 # Exists as separate function to facilitate monkeypatching.
78 78 def readprofile(repo, profile, changeid):
79 79 """Resolve the raw content of a sparse profile file."""
80 80 # TODO add some kind of cache here because this incurs a manifest
81 81 # resolve and can be slow.
82 82 return repo.filectx(profile, changeid=changeid).data()
83 83
84 84 def patternsforrev(repo, rev):
85 85 """Obtain sparse checkout patterns for the given rev.
86 86
87 87 Returns a tuple of iterables representing includes, excludes, and
88 88 patterns.
89 89 """
90 90 # Feature isn't enabled. No-op.
91 91 if not enabled:
92 92 return set(), set(), set()
93 93
94 94 raw = repo.vfs.tryread('sparse')
95 95 if not raw:
96 96 return set(), set(), set()
97 97
98 98 if rev is None:
99 99 raise error.Abort(_('cannot parse sparse patterns from working '
100 100 'directory'))
101 101
102 102 includes, excludes, profiles = parseconfig(repo.ui, raw)
103 103 ctx = repo[rev]
104 104
105 105 if profiles:
106 106 visited = set()
107 107 while profiles:
108 108 profile = profiles.pop()
109 109 if profile in visited:
110 110 continue
111 111
112 112 visited.add(profile)
113 113
114 114 try:
115 115 raw = readprofile(repo, profile, rev)
116 116 except error.ManifestLookupError:
117 117 msg = (
118 118 "warning: sparse profile '%s' not found "
119 119 "in rev %s - ignoring it\n" % (profile, ctx))
120 120 # experimental config: sparse.missingwarning
121 121 if repo.ui.configbool(
122 122 'sparse', 'missingwarning'):
123 123 repo.ui.warn(msg)
124 124 else:
125 125 repo.ui.debug(msg)
126 126 continue
127 127
128 128 pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw)
129 129 includes.update(pincludes)
130 130 excludes.update(pexcludes)
131 131 profiles.update(subprofs)
132 132
133 133 profiles = visited
134 134
135 135 if includes:
136 136 includes.add('.hg*')
137 137
138 138 return includes, excludes, profiles
139 139
140 140 def activeconfig(repo):
141 141 """Determine the active sparse config rules.
142 142
143 143 Rules are constructed by reading the current sparse config and bringing in
144 144 referenced profiles from parents of the working directory.
145 145 """
146 146 revs = [repo.changelog.rev(node) for node in
147 147 repo.dirstate.parents() if node != nullid]
148 148
149 149 allincludes = set()
150 150 allexcludes = set()
151 151 allprofiles = set()
152 152
153 153 for rev in revs:
154 154 includes, excludes, profiles = patternsforrev(repo, rev)
155 155 allincludes |= includes
156 156 allexcludes |= excludes
157 157 allprofiles |= profiles
158 158
159 159 return allincludes, allexcludes, allprofiles
160 160
161 161 def configsignature(repo, includetemp=True):
162 162 """Obtain the signature string for the current sparse configuration.
163 163
164 164 This is used to construct a cache key for matchers.
165 165 """
166 166 cache = repo._sparsesignaturecache
167 167
168 168 signature = cache.get('signature')
169 169
170 170 if includetemp:
171 171 tempsignature = cache.get('tempsignature')
172 172 else:
173 173 tempsignature = '0'
174 174
175 175 if signature is None or (includetemp and tempsignature is None):
176 176 signature = hashlib.sha1(repo.vfs.tryread('sparse')).hexdigest()
177 177 cache['signature'] = signature
178 178
179 179 if includetemp:
180 180 raw = repo.vfs.tryread('tempsparse')
181 181 tempsignature = hashlib.sha1(raw).hexdigest()
182 182 cache['tempsignature'] = tempsignature
183 183
184 184 return '%s %s' % (signature, tempsignature)
185 185
186 186 def writeconfig(repo, includes, excludes, profiles):
187 187 """Write the sparse config file given a sparse configuration."""
188 188 with repo.vfs('sparse', 'wb') as fh:
189 189 for p in sorted(profiles):
190 190 fh.write('%%include %s\n' % p)
191 191
192 192 if includes:
193 193 fh.write('[include]\n')
194 194 for i in sorted(includes):
195 195 fh.write(i)
196 196 fh.write('\n')
197 197
198 198 if excludes:
199 199 fh.write('[exclude]\n')
200 200 for e in sorted(excludes):
201 201 fh.write(e)
202 202 fh.write('\n')
203 203
204 204 repo._sparsesignaturecache.clear()
205 205
206 206 def readtemporaryincludes(repo):
207 207 raw = repo.vfs.tryread('tempsparse')
208 208 if not raw:
209 209 return set()
210 210
211 211 return set(raw.split('\n'))
212 212
213 213 def writetemporaryincludes(repo, includes):
214 214 repo.vfs.write('tempsparse', '\n'.join(sorted(includes)))
215 215 repo._sparsesignaturecache.clear()
216 216
217 217 def addtemporaryincludes(repo, additional):
218 218 includes = readtemporaryincludes(repo)
219 219 for i in additional:
220 220 includes.add(i)
221 221 writetemporaryincludes(repo, includes)
222 222
223 223 def prunetemporaryincludes(repo):
224 224 if not enabled or not repo.vfs.exists('tempsparse'):
225 225 return
226 226
227 227 s = repo.status()
228 228 if s.modified or s.added or s.removed or s.deleted:
229 229 # Still have pending changes. Don't bother trying to prune.
230 230 return
231 231
232 232 sparsematch = matcher(repo, includetemp=False)
233 233 dirstate = repo.dirstate
234 234 actions = []
235 235 dropped = []
236 236 tempincludes = readtemporaryincludes(repo)
237 237 for file in tempincludes:
238 238 if file in dirstate and not sparsematch(file):
239 239 message = _('dropping temporarily included sparse files')
240 240 actions.append((file, None, message))
241 241 dropped.append(file)
242 242
243 243 typeactions = collections.defaultdict(list)
244 244 typeactions['r'] = actions
245 245 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
246 246
247 247 # Fix dirstate
248 248 for file in dropped:
249 249 dirstate.drop(file)
250 250
251 251 repo.vfs.unlink('tempsparse')
252 252 repo._sparsesignaturecache.clear()
253 253 msg = _('cleaned up %d temporarily added file(s) from the '
254 254 'sparse checkout\n')
255 255 repo.ui.status(msg % len(tempincludes))
256 256
257 257 def forceincludematcher(matcher, includes):
258 258 """Returns a matcher that returns true for any of the forced includes
259 259 before testing against the actual matcher."""
260 260 kindpats = [('path', include, '') for include in includes]
261 261 includematcher = matchmod.includematcher('', '', kindpats)
262 262 return matchmod.unionmatcher([includematcher, matcher])
263 263
264 264 def matcher(repo, revs=None, includetemp=True):
265 265 """Obtain a matcher for sparse working directories for the given revs.
266 266
267 267 If multiple revisions are specified, the matcher is the union of all
268 268 revs.
269 269
270 270 ``includetemp`` indicates whether to use the temporary sparse profile.
271 271 """
272 272 # If sparse isn't enabled, sparse matcher matches everything.
273 273 if not enabled:
274 274 return matchmod.always(repo.root, '')
275 275
276 276 if not revs or revs == [None]:
277 277 revs = [repo.changelog.rev(node)
278 278 for node in repo.dirstate.parents() if node != nullid]
279 279
280 280 signature = configsignature(repo, includetemp=includetemp)
281 281
282 282 key = '%s %s' % (signature, ' '.join(map(pycompat.bytestr, revs)))
283 283
284 284 result = repo._sparsematchercache.get(key)
285 285 if result:
286 286 return result
287 287
288 288 matchers = []
289 289 for rev in revs:
290 290 try:
291 291 includes, excludes, profiles = patternsforrev(repo, rev)
292 292
293 293 if includes or excludes:
294 294 # Explicitly include subdirectories of includes so
295 295 # status will walk them down to the actual include.
296 296 subdirs = set()
297 297 for include in includes:
298 298 # TODO consider using posix path functions here so Windows
299 299 # \ directory separators don't come into play.
300 300 dirname = os.path.dirname(include)
301 301 # basename is used to avoid issues with absolute
302 302 # paths (which on Windows can include the drive).
303 303 while os.path.basename(dirname):
304 304 subdirs.add(dirname)
305 305 dirname = os.path.dirname(dirname)
306 306
307 307 matcher = matchmod.match(repo.root, '', [],
308 308 include=includes, exclude=excludes,
309 309 default='relpath')
310 310 if subdirs:
311 311 matcher = forceincludematcher(matcher, subdirs)
312 312 matchers.append(matcher)
313 313 except IOError:
314 314 pass
315 315
316 316 if not matchers:
317 317 result = matchmod.always(repo.root, '')
318 318 elif len(matchers) == 1:
319 319 result = matchers[0]
320 320 else:
321 321 result = matchmod.unionmatcher(matchers)
322 322
323 323 if includetemp:
324 324 tempincludes = readtemporaryincludes(repo)
325 325 result = forceincludematcher(result, tempincludes)
326 326
327 327 repo._sparsematchercache[key] = result
328 328
329 329 return result
330 330
331 331 def filterupdatesactions(repo, wctx, mctx, branchmerge, actions):
332 332 """Filter updates to only lay out files that match the sparse rules."""
333 333 if not enabled:
334 334 return actions
335 335
336 336 oldrevs = [pctx.rev() for pctx in wctx.parents()]
337 337 oldsparsematch = matcher(repo, oldrevs)
338 338
339 339 if oldsparsematch.always():
340 340 return actions
341 341
342 342 files = set()
343 343 prunedactions = {}
344 344
345 345 if branchmerge:
346 346 # If we're merging, use the wctx filter, since we're merging into
347 347 # the wctx.
348 348 sparsematch = matcher(repo, [wctx.parents()[0].rev()])
349 349 else:
350 350 # If we're updating, use the target context's filter, since we're
351 351 # moving to the target context.
352 352 sparsematch = matcher(repo, [mctx.rev()])
353 353
354 354 temporaryfiles = []
355 355 for file, action in actions.iteritems():
356 356 type, args, msg = action
357 357 files.add(file)
358 358 if sparsematch(file):
359 359 prunedactions[file] = action
360 360 elif type == 'm':
361 361 temporaryfiles.append(file)
362 362 prunedactions[file] = action
363 363 elif branchmerge:
364 364 if type != 'k':
365 365 temporaryfiles.append(file)
366 366 prunedactions[file] = action
367 367 elif type == 'f':
368 368 prunedactions[file] = action
369 369 elif file in wctx:
370 370 prunedactions[file] = ('r', args, msg)
371 371
372 372 if len(temporaryfiles) > 0:
373 373 repo.ui.status(_('temporarily included %d file(s) in the sparse '
374 374 'checkout for merging\n') % len(temporaryfiles))
375 375 addtemporaryincludes(repo, temporaryfiles)
376 376
377 377 # Add the new files to the working copy so they can be merged, etc
378 378 actions = []
379 379 message = 'temporarily adding to sparse checkout'
380 380 wctxmanifest = repo[None].manifest()
381 381 for file in temporaryfiles:
382 382 if file in wctxmanifest:
383 383 fctx = repo[None][file]
384 384 actions.append((file, (fctx.flags(), False), message))
385 385
386 386 typeactions = collections.defaultdict(list)
387 387 typeactions['g'] = actions
388 388 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'],
389 389 False)
390 390
391 391 dirstate = repo.dirstate
392 392 for file, flags, msg in actions:
393 393 dirstate.normal(file)
394 394
395 395 profiles = activeconfig(repo)[2]
396 396 changedprofiles = profiles & files
397 397 # If an active profile changed during the update, refresh the checkout.
398 398 # Don't do this during a branch merge, since all incoming changes should
399 399 # have been handled by the temporary includes above.
400 400 if changedprofiles and not branchmerge:
401 401 mf = mctx.manifest()
402 402 for file in mf:
403 403 old = oldsparsematch(file)
404 404 new = sparsematch(file)
405 405 if not old and new:
406 406 flags = mf.flags(file)
407 407 prunedactions[file] = ('g', (flags, False), '')
408 408 elif old and not new:
409 409 prunedactions[file] = ('r', [], '')
410 410
411 411 return prunedactions
412 412
413 413 def refreshwdir(repo, origstatus, origsparsematch, force=False):
414 414 """Refreshes working directory by taking sparse config into account.
415 415
416 416 The old status and sparse matcher is compared against the current sparse
417 417 matcher.
418 418
419 419 Will abort if a file with pending changes is being excluded or included
420 420 unless ``force`` is True.
421 421 """
422 422 # Verify there are no pending changes
423 423 pending = set()
424 424 pending.update(origstatus.modified)
425 425 pending.update(origstatus.added)
426 426 pending.update(origstatus.removed)
427 427 sparsematch = matcher(repo)
428 428 abort = False
429 429
430 430 for f in pending:
431 431 if not sparsematch(f):
432 432 repo.ui.warn(_("pending changes to '%s'\n") % f)
433 433 abort = not force
434 434
435 435 if abort:
436 436 raise error.Abort(_('could not update sparseness due to pending '
437 437 'changes'))
438 438
439 439 # Calculate actions
440 440 dirstate = repo.dirstate
441 441 ctx = repo['.']
442 442 added = []
443 443 lookup = []
444 444 dropped = []
445 445 mf = ctx.manifest()
446 446 files = set(mf)
447 447
448 448 actions = {}
449 449
450 450 for file in files:
451 451 old = origsparsematch(file)
452 452 new = sparsematch(file)
453 453 # Add files that are newly included, or that don't exist in
454 454 # the dirstate yet.
455 455 if (new and not old) or (old and new and not file in dirstate):
456 456 fl = mf.flags(file)
457 457 if repo.wvfs.exists(file):
458 458 actions[file] = ('e', (fl,), '')
459 459 lookup.append(file)
460 460 else:
461 461 actions[file] = ('g', (fl, False), '')
462 462 added.append(file)
463 463 # Drop files that are newly excluded, or that still exist in
464 464 # the dirstate.
465 465 elif (old and not new) or (not old and not new and file in dirstate):
466 466 dropped.append(file)
467 467 if file not in pending:
468 468 actions[file] = ('r', [], '')
469 469
470 470 # Verify there are no pending changes in newly included files
471 471 abort = False
472 472 for file in lookup:
473 473 repo.ui.warn(_("pending changes to '%s'\n") % file)
474 474 abort = not force
475 475 if abort:
476 476 raise error.Abort(_('cannot change sparseness due to pending '
477 477 'changes (delete the files or use '
478 478 '--force to bring them back dirty)'))
479 479
480 480 # Check for files that were only in the dirstate.
481 481 for file, state in dirstate.iteritems():
482 482 if not file in files:
483 483 old = origsparsematch(file)
484 484 new = sparsematch(file)
485 485 if old and not new:
486 486 dropped.append(file)
487 487
488 488 # Apply changes to disk
489 typeactions = dict((m, []) for m in 'a f g am cd dc r dm dg m e k'.split())
489 typeactions = dict((m, [])
490 for m in 'a f g am cd dc r dm dg m e k p'.split())
490 491 for f, (m, args, msg) in actions.iteritems():
491 492 if m not in typeactions:
492 493 typeactions[m] = []
493 494 typeactions[m].append((f, args, msg))
494 495
495 496 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
496 497
497 498 # Fix dirstate
498 499 for file in added:
499 500 dirstate.normal(file)
500 501
501 502 for file in dropped:
502 503 dirstate.drop(file)
503 504
504 505 for file in lookup:
505 506 # File exists on disk, and we're bringing it back in an unknown state.
506 507 dirstate.normallookup(file)
507 508
508 509 return added, dropped, lookup
509 510
510 511 def aftercommit(repo, node):
511 512 """Perform actions after a working directory commit."""
512 513 # This function is called unconditionally, even if sparse isn't
513 514 # enabled.
514 515 ctx = repo[node]
515 516
516 517 profiles = patternsforrev(repo, ctx.rev())[2]
517 518
518 519 # profiles will only have data if sparse is enabled.
519 520 if profiles & set(ctx.files()):
520 521 origstatus = repo.status()
521 522 origsparsematch = matcher(repo)
522 523 refreshwdir(repo, origstatus, origsparsematch, force=True)
523 524
524 525 prunetemporaryincludes(repo)
525 526
526 527 def _updateconfigandrefreshwdir(repo, includes, excludes, profiles,
527 528 force=False, removing=False):
528 529 """Update the sparse config and working directory state."""
529 530 raw = repo.vfs.tryread('sparse')
530 531 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw)
531 532
532 533 oldstatus = repo.status()
533 534 oldmatch = matcher(repo)
534 535 oldrequires = set(repo.requirements)
535 536
536 537 # TODO remove this try..except once the matcher integrates better
537 538 # with dirstate. We currently have to write the updated config
538 539 # because that will invalidate the matcher cache and force a
539 540 # re-read. We ideally want to update the cached matcher on the
540 541 # repo instance then flush the new config to disk once wdir is
541 542 # updated. But this requires massive rework to matcher() and its
542 543 # consumers.
543 544
544 545 if 'exp-sparse' in oldrequires and removing:
545 546 repo.requirements.discard('exp-sparse')
546 547 scmutil.writerequires(repo.vfs, repo.requirements)
547 548 elif 'exp-sparse' not in oldrequires:
548 549 repo.requirements.add('exp-sparse')
549 550 scmutil.writerequires(repo.vfs, repo.requirements)
550 551
551 552 try:
552 553 writeconfig(repo, includes, excludes, profiles)
553 554 return refreshwdir(repo, oldstatus, oldmatch, force=force)
554 555 except Exception:
555 556 if repo.requirements != oldrequires:
556 557 repo.requirements.clear()
557 558 repo.requirements |= oldrequires
558 559 scmutil.writerequires(repo.vfs, repo.requirements)
559 560 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
560 561 raise
561 562
562 563 def clearrules(repo, force=False):
563 564 """Clears include/exclude rules from the sparse config.
564 565
565 566 The remaining sparse config only has profiles, if defined. The working
566 567 directory is refreshed, as needed.
567 568 """
568 569 with repo.wlock():
569 570 raw = repo.vfs.tryread('sparse')
570 571 includes, excludes, profiles = parseconfig(repo.ui, raw)
571 572
572 573 if not includes and not excludes:
573 574 return
574 575
575 576 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
576 577
577 578 def importfromfiles(repo, opts, paths, force=False):
578 579 """Import sparse config rules from files.
579 580
580 581 The updated sparse config is written out and the working directory
581 582 is refreshed, as needed.
582 583 """
583 584 with repo.wlock():
584 585 # read current configuration
585 586 raw = repo.vfs.tryread('sparse')
586 587 includes, excludes, profiles = parseconfig(repo.ui, raw)
587 588 aincludes, aexcludes, aprofiles = activeconfig(repo)
588 589
589 590 # Import rules on top; only take in rules that are not yet
590 591 # part of the active rules.
591 592 changed = False
592 593 for p in paths:
593 594 with util.posixfile(util.expandpath(p)) as fh:
594 595 raw = fh.read()
595 596
596 597 iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw)
597 598 oldsize = len(includes) + len(excludes) + len(profiles)
598 599 includes.update(iincludes - aincludes)
599 600 excludes.update(iexcludes - aexcludes)
600 601 profiles.update(iprofiles - aprofiles)
601 602 if len(includes) + len(excludes) + len(profiles) > oldsize:
602 603 changed = True
603 604
604 605 profilecount = includecount = excludecount = 0
605 606 fcounts = (0, 0, 0)
606 607
607 608 if changed:
608 609 profilecount = len(profiles - aprofiles)
609 610 includecount = len(includes - aincludes)
610 611 excludecount = len(excludes - aexcludes)
611 612
612 613 fcounts = map(len, _updateconfigandrefreshwdir(
613 614 repo, includes, excludes, profiles, force=force))
614 615
615 616 printchanges(repo.ui, opts, profilecount, includecount, excludecount,
616 617 *fcounts)
617 618
618 619 def updateconfig(repo, pats, opts, include=False, exclude=False, reset=False,
619 620 delete=False, enableprofile=False, disableprofile=False,
620 621 force=False, usereporootpaths=False):
621 622 """Perform a sparse config update.
622 623
623 624 Only one of the actions may be performed.
624 625
625 626 The new config is written out and a working directory refresh is performed.
626 627 """
627 628 with repo.wlock():
628 629 raw = repo.vfs.tryread('sparse')
629 630 oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw)
630 631
631 632 if reset:
632 633 newinclude = set()
633 634 newexclude = set()
634 635 newprofiles = set()
635 636 else:
636 637 newinclude = set(oldinclude)
637 638 newexclude = set(oldexclude)
638 639 newprofiles = set(oldprofiles)
639 640
640 641 if any(os.path.isabs(pat) for pat in pats):
641 642 raise error.Abort(_('paths cannot be absolute'))
642 643
643 644 if not usereporootpaths:
644 645 # let's treat paths as relative to cwd
645 646 root, cwd = repo.root, repo.getcwd()
646 647 abspats = []
647 648 for kindpat in pats:
648 649 kind, pat = matchmod._patsplit(kindpat, None)
649 650 if kind in matchmod.cwdrelativepatternkinds or kind is None:
650 651 ap = (kind + ':' if kind else '') +\
651 652 pathutil.canonpath(root, cwd, pat)
652 653 abspats.append(ap)
653 654 else:
654 655 abspats.append(kindpat)
655 656 pats = abspats
656 657
657 658 if include:
658 659 newinclude.update(pats)
659 660 elif exclude:
660 661 newexclude.update(pats)
661 662 elif enableprofile:
662 663 newprofiles.update(pats)
663 664 elif disableprofile:
664 665 newprofiles.difference_update(pats)
665 666 elif delete:
666 667 newinclude.difference_update(pats)
667 668 newexclude.difference_update(pats)
668 669
669 670 profilecount = (len(newprofiles - oldprofiles) -
670 671 len(oldprofiles - newprofiles))
671 672 includecount = (len(newinclude - oldinclude) -
672 673 len(oldinclude - newinclude))
673 674 excludecount = (len(newexclude - oldexclude) -
674 675 len(oldexclude - newexclude))
675 676
676 677 fcounts = map(len, _updateconfigandrefreshwdir(
677 678 repo, newinclude, newexclude, newprofiles, force=force,
678 679 removing=reset))
679 680
680 681 printchanges(repo.ui, opts, profilecount, includecount,
681 682 excludecount, *fcounts)
682 683
683 684 def printchanges(ui, opts, profilecount=0, includecount=0, excludecount=0,
684 685 added=0, dropped=0, conflicting=0):
685 686 """Print output summarizing sparse config changes."""
686 687 with ui.formatter('sparse', opts) as fm:
687 688 fm.startitem()
688 689 fm.condwrite(ui.verbose, 'profiles_added', _('Profiles changed: %d\n'),
689 690 profilecount)
690 691 fm.condwrite(ui.verbose, 'include_rules_added',
691 692 _('Include rules changed: %d\n'), includecount)
692 693 fm.condwrite(ui.verbose, 'exclude_rules_added',
693 694 _('Exclude rules changed: %d\n'), excludecount)
694 695
695 696 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
696 697 # files are added or removed outside of the templating formatter
697 698 # framework. No point in repeating ourselves in that case.
698 699 if not fm.isplain():
699 700 fm.condwrite(ui.verbose, 'files_added', _('Files added: %d\n'),
700 701 added)
701 702 fm.condwrite(ui.verbose, 'files_dropped', _('Files dropped: %d\n'),
702 703 dropped)
703 704 fm.condwrite(ui.verbose, 'files_conflicting',
704 705 _('Files conflicting: %d\n'), conflicting)
General Comments 0
You need to be logged in to leave comments. Login now