##// END OF EJS Templates
merge: while checking for unknown files don't follow symlinks (issue5027)...
Siddharth Agarwal -
r27571:6a6e78f8 stable
parent child Browse files
Show More
@@ -1,1344 +1,1344
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import shutil
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 )
22 22 from . import (
23 23 copies,
24 24 destutil,
25 25 error,
26 26 filemerge,
27 27 obsolete,
28 28 subrepo,
29 29 util,
30 30 worker,
31 31 )
32 32
33 33 _pack = struct.pack
34 34 _unpack = struct.unpack
35 35
36 36 def _droponode(data):
37 37 # used for compatibility for v1
38 38 bits = data.split('\0')
39 39 bits = bits[:-2] + bits[-1:]
40 40 return '\0'.join(bits)
41 41
42 42 class mergestate(object):
43 43 '''track 3-way merge state of individual files
44 44
45 45 it is stored on disk when needed. Two file are used, one with an old
46 46 format, one with a new format. Both contains similar data, but the new
47 47 format can store new kinds of field.
48 48
49 49 Current new format is a list of arbitrary record of the form:
50 50
51 51 [type][length][content]
52 52
53 53 Type is a single character, length is a 4 bytes integer, content is an
54 54 arbitrary suites of bytes of length `length`.
55 55
56 56 Type should be a letter. Capital letter are mandatory record, Mercurial
57 57 should abort if they are unknown. lower case record can be safely ignored.
58 58
59 59 Currently known record:
60 60
61 61 L: the node of the "local" part of the merge (hexified version)
62 62 O: the node of the "other" part of the merge (hexified version)
63 63 F: a file to be merged entry
64 64 D: a file that the external merge driver will merge internally
65 65 (experimental)
66 66 m: the external merge driver defined for this merge plus its run state
67 67 (experimental)
68 68
69 69 Merge driver run states (experimental):
70 70 u: driver-resolved files unmarked -- needs to be run next time we're about
71 71 to resolve or commit
72 72 m: driver-resolved files marked -- only needs to be run before commit
73 73 s: success/skipped -- does not need to be run any more
74 74 '''
75 75 statepathv1 = 'merge/state'
76 76 statepathv2 = 'merge/state2'
77 77
78 78 def __init__(self, repo):
79 79 self._repo = repo
80 80 self._dirty = False
81 81 self._read()
82 82
83 83 def reset(self, node=None, other=None):
84 84 self._state = {}
85 85 self._local = None
86 86 self._other = None
87 87 if 'otherctx' in vars(self):
88 88 del self.otherctx
89 89 if node:
90 90 self._local = node
91 91 self._other = other
92 92 self._readmergedriver = None
93 93 if self.mergedriver:
94 94 self._mdstate = 's'
95 95 else:
96 96 self._mdstate = 'u'
97 97 shutil.rmtree(self._repo.join('merge'), True)
98 98 self._dirty = False
99 99
100 100 def _read(self):
101 101 """Analyse each record content to restore a serialized state from disk
102 102
103 103 This function process "record" entry produced by the de-serialization
104 104 of on disk file.
105 105 """
106 106 self._state = {}
107 107 self._local = None
108 108 self._other = None
109 109 if 'otherctx' in vars(self):
110 110 del self.otherctx
111 111 self._readmergedriver = None
112 112 self._mdstate = 's'
113 113 records = self._readrecords()
114 114 for rtype, record in records:
115 115 if rtype == 'L':
116 116 self._local = bin(record)
117 117 elif rtype == 'O':
118 118 self._other = bin(record)
119 119 elif rtype == 'm':
120 120 bits = record.split('\0', 1)
121 121 mdstate = bits[1]
122 122 if len(mdstate) != 1 or mdstate not in 'ums':
123 123 # the merge driver should be idempotent, so just rerun it
124 124 mdstate = 'u'
125 125
126 126 self._readmergedriver = bits[0]
127 127 self._mdstate = mdstate
128 128 elif rtype in 'FD':
129 129 bits = record.split('\0')
130 130 self._state[bits[0]] = bits[1:]
131 131 elif not rtype.islower():
132 132 raise error.Abort(_('unsupported merge state record: %s')
133 133 % rtype)
134 134 self._dirty = False
135 135
136 136 def _readrecords(self):
137 137 """Read merge state from disk and return a list of record (TYPE, data)
138 138
139 139 We read data from both v1 and v2 files and decide which one to use.
140 140
141 141 V1 has been used by version prior to 2.9.1 and contains less data than
142 142 v2. We read both versions and check if no data in v2 contradicts
143 143 v1. If there is not contradiction we can safely assume that both v1
144 144 and v2 were written at the same time and use the extract data in v2. If
145 145 there is contradiction we ignore v2 content as we assume an old version
146 146 of Mercurial has overwritten the mergestate file and left an old v2
147 147 file around.
148 148
149 149 returns list of record [(TYPE, data), ...]"""
150 150 v1records = self._readrecordsv1()
151 151 v2records = self._readrecordsv2()
152 152 if self._v1v2match(v1records, v2records):
153 153 return v2records
154 154 else:
155 155 # v1 file is newer than v2 file, use it
156 156 # we have to infer the "other" changeset of the merge
157 157 # we cannot do better than that with v1 of the format
158 158 mctx = self._repo[None].parents()[-1]
159 159 v1records.append(('O', mctx.hex()))
160 160 # add place holder "other" file node information
161 161 # nobody is using it yet so we do no need to fetch the data
162 162 # if mctx was wrong `mctx[bits[-2]]` may fails.
163 163 for idx, r in enumerate(v1records):
164 164 if r[0] == 'F':
165 165 bits = r[1].split('\0')
166 166 bits.insert(-2, '')
167 167 v1records[idx] = (r[0], '\0'.join(bits))
168 168 return v1records
169 169
170 170 def _v1v2match(self, v1records, v2records):
171 171 oldv2 = set() # old format version of v2 record
172 172 for rec in v2records:
173 173 if rec[0] == 'L':
174 174 oldv2.add(rec)
175 175 elif rec[0] == 'F':
176 176 # drop the onode data (not contained in v1)
177 177 oldv2.add(('F', _droponode(rec[1])))
178 178 for rec in v1records:
179 179 if rec not in oldv2:
180 180 return False
181 181 else:
182 182 return True
183 183
184 184 def _readrecordsv1(self):
185 185 """read on disk merge state for version 1 file
186 186
187 187 returns list of record [(TYPE, data), ...]
188 188
189 189 Note: the "F" data from this file are one entry short
190 190 (no "other file node" entry)
191 191 """
192 192 records = []
193 193 try:
194 194 f = self._repo.vfs(self.statepathv1)
195 195 for i, l in enumerate(f):
196 196 if i == 0:
197 197 records.append(('L', l[:-1]))
198 198 else:
199 199 records.append(('F', l[:-1]))
200 200 f.close()
201 201 except IOError as err:
202 202 if err.errno != errno.ENOENT:
203 203 raise
204 204 return records
205 205
206 206 def _readrecordsv2(self):
207 207 """read on disk merge state for version 2 file
208 208
209 209 returns list of record [(TYPE, data), ...]
210 210 """
211 211 records = []
212 212 try:
213 213 f = self._repo.vfs(self.statepathv2)
214 214 data = f.read()
215 215 off = 0
216 216 end = len(data)
217 217 while off < end:
218 218 rtype = data[off]
219 219 off += 1
220 220 length = _unpack('>I', data[off:(off + 4)])[0]
221 221 off += 4
222 222 record = data[off:(off + length)]
223 223 off += length
224 224 records.append((rtype, record))
225 225 f.close()
226 226 except IOError as err:
227 227 if err.errno != errno.ENOENT:
228 228 raise
229 229 return records
230 230
231 231 @util.propertycache
232 232 def mergedriver(self):
233 233 # protect against the following:
234 234 # - A configures a malicious merge driver in their hgrc, then
235 235 # pauses the merge
236 236 # - A edits their hgrc to remove references to the merge driver
237 237 # - A gives a copy of their entire repo, including .hg, to B
238 238 # - B inspects .hgrc and finds it to be clean
239 239 # - B then continues the merge and the malicious merge driver
240 240 # gets invoked
241 241 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
242 242 if (self._readmergedriver is not None
243 243 and self._readmergedriver != configmergedriver):
244 244 raise error.ConfigError(
245 245 _("merge driver changed since merge started"),
246 246 hint=_("revert merge driver change or abort merge"))
247 247
248 248 return configmergedriver
249 249
250 250 @util.propertycache
251 251 def otherctx(self):
252 252 return self._repo[self._other]
253 253
254 254 def active(self):
255 255 """Whether mergestate is active.
256 256
257 257 Returns True if there appears to be mergestate. This is a rough proxy
258 258 for "is a merge in progress."
259 259 """
260 260 # Check local variables before looking at filesystem for performance
261 261 # reasons.
262 262 return bool(self._local) or bool(self._state) or \
263 263 self._repo.vfs.exists(self.statepathv1) or \
264 264 self._repo.vfs.exists(self.statepathv2)
265 265
266 266 def commit(self):
267 267 """Write current state on disk (if necessary)"""
268 268 if self._dirty:
269 269 records = []
270 270 records.append(('L', hex(self._local)))
271 271 records.append(('O', hex(self._other)))
272 272 if self.mergedriver:
273 273 records.append(('m', '\0'.join([
274 274 self.mergedriver, self._mdstate])))
275 275 for d, v in self._state.iteritems():
276 276 if v[0] == 'd':
277 277 records.append(('D', '\0'.join([d] + v)))
278 278 else:
279 279 records.append(('F', '\0'.join([d] + v)))
280 280 self._writerecords(records)
281 281 self._dirty = False
282 282
283 283 def _writerecords(self, records):
284 284 """Write current state on disk (both v1 and v2)"""
285 285 self._writerecordsv1(records)
286 286 self._writerecordsv2(records)
287 287
288 288 def _writerecordsv1(self, records):
289 289 """Write current state on disk in a version 1 file"""
290 290 f = self._repo.vfs(self.statepathv1, 'w')
291 291 irecords = iter(records)
292 292 lrecords = irecords.next()
293 293 assert lrecords[0] == 'L'
294 294 f.write(hex(self._local) + '\n')
295 295 for rtype, data in irecords:
296 296 if rtype == 'F':
297 297 f.write('%s\n' % _droponode(data))
298 298 f.close()
299 299
300 300 def _writerecordsv2(self, records):
301 301 """Write current state on disk in a version 2 file"""
302 302 f = self._repo.vfs(self.statepathv2, 'w')
303 303 for key, data in records:
304 304 assert len(key) == 1
305 305 format = '>sI%is' % len(data)
306 306 f.write(_pack(format, key, len(data), data))
307 307 f.close()
308 308
309 309 def add(self, fcl, fco, fca, fd):
310 310 """add a new (potentially?) conflicting file the merge state
311 311 fcl: file context for local,
312 312 fco: file context for remote,
313 313 fca: file context for ancestors,
314 314 fd: file path of the resulting merge.
315 315
316 316 note: also write the local version to the `.hg/merge` directory.
317 317 """
318 318 hash = util.sha1(fcl.path()).hexdigest()
319 319 self._repo.vfs.write('merge/' + hash, fcl.data())
320 320 self._state[fd] = ['u', hash, fcl.path(),
321 321 fca.path(), hex(fca.filenode()),
322 322 fco.path(), hex(fco.filenode()),
323 323 fcl.flags()]
324 324 self._dirty = True
325 325
326 326 def __contains__(self, dfile):
327 327 return dfile in self._state
328 328
329 329 def __getitem__(self, dfile):
330 330 return self._state[dfile][0]
331 331
332 332 def __iter__(self):
333 333 return iter(sorted(self._state))
334 334
335 335 def files(self):
336 336 return self._state.keys()
337 337
338 338 def mark(self, dfile, state):
339 339 self._state[dfile][0] = state
340 340 self._dirty = True
341 341
342 342 def mdstate(self):
343 343 return self._mdstate
344 344
345 345 def unresolved(self):
346 346 """Obtain the paths of unresolved files."""
347 347
348 348 for f, entry in self._state.items():
349 349 if entry[0] == 'u':
350 350 yield f
351 351
352 352 def driverresolved(self):
353 353 """Obtain the paths of driver-resolved files."""
354 354
355 355 for f, entry in self._state.items():
356 356 if entry[0] == 'd':
357 357 yield f
358 358
359 359 def _resolve(self, preresolve, dfile, wctx, labels=None):
360 360 """rerun merge process for file path `dfile`"""
361 361 if self[dfile] in 'rd':
362 362 return True, 0
363 363 stateentry = self._state[dfile]
364 364 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
365 365 octx = self._repo[self._other]
366 366 fcd = wctx[dfile]
367 367 fco = octx[ofile]
368 368 fca = self._repo.filectx(afile, fileid=anode)
369 369 # "premerge" x flags
370 370 flo = fco.flags()
371 371 fla = fca.flags()
372 372 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
373 373 if fca.node() == nullid:
374 374 if preresolve:
375 375 self._repo.ui.warn(
376 376 _('warning: cannot merge flags for %s\n') % afile)
377 377 elif flags == fla:
378 378 flags = flo
379 379 if preresolve:
380 380 # restore local
381 381 f = self._repo.vfs('merge/' + hash)
382 382 self._repo.wwrite(dfile, f.read(), flags)
383 383 f.close()
384 384 complete, r = filemerge.premerge(self._repo, self._local, lfile,
385 385 fcd, fco, fca, labels=labels)
386 386 else:
387 387 complete, r = filemerge.filemerge(self._repo, self._local, lfile,
388 388 fcd, fco, fca, labels=labels)
389 389 if r is None:
390 390 # no real conflict
391 391 del self._state[dfile]
392 392 self._dirty = True
393 393 elif not r:
394 394 self.mark(dfile, 'r')
395 395 return complete, r
396 396
397 397 def preresolve(self, dfile, wctx, labels=None):
398 398 return self._resolve(True, dfile, wctx, labels=labels)
399 399
400 400 def resolve(self, dfile, wctx, labels=None):
401 401 """rerun merge process for file path `dfile`"""
402 402 return self._resolve(False, dfile, wctx, labels=labels)[1]
403 403
404 404 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
405 405 if f2 is None:
406 406 f2 = f
407 return (os.path.isfile(repo.wjoin(f))
407 return (repo.wvfs.isfileorlink(f)
408 408 and repo.wvfs.audit.check(f)
409 409 and repo.dirstate.normalize(f) not in repo.dirstate
410 410 and mctx[f2].cmp(wctx[f]))
411 411
412 412 def _checkunknownfiles(repo, wctx, mctx, force, actions):
413 413 """
414 414 Considers any actions that care about the presence of conflicting unknown
415 415 files. For some actions, the result is to abort; for others, it is to
416 416 choose a different action.
417 417 """
418 418 aborts = []
419 419 if not force:
420 420 for f, (m, args, msg) in actions.iteritems():
421 421 if m in ('c', 'dc'):
422 422 if _checkunknownfile(repo, wctx, mctx, f):
423 423 aborts.append(f)
424 424 elif m == 'dg':
425 425 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
426 426 aborts.append(f)
427 427
428 428 for f in sorted(aborts):
429 429 repo.ui.warn(_("%s: untracked file differs\n") % f)
430 430 if aborts:
431 431 raise error.Abort(_("untracked files in working directory differ "
432 432 "from files in requested revision"))
433 433
434 434 for f, (m, args, msg) in actions.iteritems():
435 435 if m == 'c':
436 436 actions[f] = ('g', args, msg)
437 437 elif m == 'cm':
438 438 fl2, anc = args
439 439 different = _checkunknownfile(repo, wctx, mctx, f)
440 440 if different:
441 441 actions[f] = ('m', (f, f, None, False, anc),
442 442 "remote differs from untracked local")
443 443 else:
444 444 actions[f] = ('g', (fl2,), "remote created")
445 445
446 446 def _forgetremoved(wctx, mctx, branchmerge):
447 447 """
448 448 Forget removed files
449 449
450 450 If we're jumping between revisions (as opposed to merging), and if
451 451 neither the working directory nor the target rev has the file,
452 452 then we need to remove it from the dirstate, to prevent the
453 453 dirstate from listing the file when it is no longer in the
454 454 manifest.
455 455
456 456 If we're merging, and the other revision has removed a file
457 457 that is not present in the working directory, we need to mark it
458 458 as removed.
459 459 """
460 460
461 461 actions = {}
462 462 m = 'f'
463 463 if branchmerge:
464 464 m = 'r'
465 465 for f in wctx.deleted():
466 466 if f not in mctx:
467 467 actions[f] = m, None, "forget deleted"
468 468
469 469 if not branchmerge:
470 470 for f in wctx.removed():
471 471 if f not in mctx:
472 472 actions[f] = 'f', None, "forget removed"
473 473
474 474 return actions
475 475
476 476 def _checkcollision(repo, wmf, actions):
477 477 # build provisional merged manifest up
478 478 pmmf = set(wmf)
479 479
480 480 if actions:
481 481 # k, dr, e and rd are no-op
482 482 for m in 'a', 'f', 'g', 'cd', 'dc':
483 483 for f, args, msg in actions[m]:
484 484 pmmf.add(f)
485 485 for f, args, msg in actions['r']:
486 486 pmmf.discard(f)
487 487 for f, args, msg in actions['dm']:
488 488 f2, flags = args
489 489 pmmf.discard(f2)
490 490 pmmf.add(f)
491 491 for f, args, msg in actions['dg']:
492 492 pmmf.add(f)
493 493 for f, args, msg in actions['m']:
494 494 f1, f2, fa, move, anc = args
495 495 if move:
496 496 pmmf.discard(f1)
497 497 pmmf.add(f)
498 498
499 499 # check case-folding collision in provisional merged manifest
500 500 foldmap = {}
501 501 for f in sorted(pmmf):
502 502 fold = util.normcase(f)
503 503 if fold in foldmap:
504 504 raise error.Abort(_("case-folding collision between %s and %s")
505 505 % (f, foldmap[fold]))
506 506 foldmap[fold] = f
507 507
508 508 # check case-folding of directories
509 509 foldprefix = unfoldprefix = lastfull = ''
510 510 for fold, f in sorted(foldmap.items()):
511 511 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
512 512 # the folded prefix matches but actual casing is different
513 513 raise error.Abort(_("case-folding collision between "
514 514 "%s and directory of %s") % (lastfull, f))
515 515 foldprefix = fold + '/'
516 516 unfoldprefix = f + '/'
517 517 lastfull = f
518 518
519 519 def driverpreprocess(repo, ms, wctx, labels=None):
520 520 """run the preprocess step of the merge driver, if any
521 521
522 522 This is currently not implemented -- it's an extension point."""
523 523 return True
524 524
525 525 def driverconclude(repo, ms, wctx, labels=None):
526 526 """run the conclude step of the merge driver, if any
527 527
528 528 This is currently not implemented -- it's an extension point."""
529 529 return True
530 530
531 531 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
532 532 acceptremote, followcopies):
533 533 """
534 534 Merge p1 and p2 with ancestor pa and generate merge action list
535 535
536 536 branchmerge and force are as passed in to update
537 537 partial = function to filter file lists
538 538 acceptremote = accept the incoming changes without prompting
539 539 """
540 540
541 541 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
542 542
543 543 # manifests fetched in order are going to be faster, so prime the caches
544 544 [x.manifest() for x in
545 545 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
546 546
547 547 if followcopies:
548 548 ret = copies.mergecopies(repo, wctx, p2, pa)
549 549 copy, movewithdir, diverge, renamedelete = ret
550 550
551 551 repo.ui.note(_("resolving manifests\n"))
552 552 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
553 553 % (bool(branchmerge), bool(force), bool(partial)))
554 554 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
555 555
556 556 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
557 557 copied = set(copy.values())
558 558 copied.update(movewithdir.values())
559 559
560 560 if '.hgsubstate' in m1:
561 561 # check whether sub state is modified
562 562 for s in sorted(wctx.substate):
563 563 if wctx.sub(s).dirty():
564 564 m1['.hgsubstate'] += '+'
565 565 break
566 566
567 567 # Compare manifests
568 568 diff = m1.diff(m2)
569 569
570 570 actions = {}
571 571 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
572 572 if partial and not partial(f):
573 573 continue
574 574 if n1 and n2: # file exists on both local and remote side
575 575 if f not in ma:
576 576 fa = copy.get(f, None)
577 577 if fa is not None:
578 578 actions[f] = ('m', (f, f, fa, False, pa.node()),
579 579 "both renamed from " + fa)
580 580 else:
581 581 actions[f] = ('m', (f, f, None, False, pa.node()),
582 582 "both created")
583 583 else:
584 584 a = ma[f]
585 585 fla = ma.flags(f)
586 586 nol = 'l' not in fl1 + fl2 + fla
587 587 if n2 == a and fl2 == fla:
588 588 actions[f] = ('k' , (), "remote unchanged")
589 589 elif n1 == a and fl1 == fla: # local unchanged - use remote
590 590 if n1 == n2: # optimization: keep local content
591 591 actions[f] = ('e', (fl2,), "update permissions")
592 592 else:
593 593 actions[f] = ('g', (fl2,), "remote is newer")
594 594 elif nol and n2 == a: # remote only changed 'x'
595 595 actions[f] = ('e', (fl2,), "update permissions")
596 596 elif nol and n1 == a: # local only changed 'x'
597 597 actions[f] = ('g', (fl1,), "remote is newer")
598 598 else: # both changed something
599 599 actions[f] = ('m', (f, f, f, False, pa.node()),
600 600 "versions differ")
601 601 elif n1: # file exists only on local side
602 602 if f in copied:
603 603 pass # we'll deal with it on m2 side
604 604 elif f in movewithdir: # directory rename, move local
605 605 f2 = movewithdir[f]
606 606 if f2 in m2:
607 607 actions[f2] = ('m', (f, f2, None, True, pa.node()),
608 608 "remote directory rename, both created")
609 609 else:
610 610 actions[f2] = ('dm', (f, fl1),
611 611 "remote directory rename - move from " + f)
612 612 elif f in copy:
613 613 f2 = copy[f]
614 614 actions[f] = ('m', (f, f2, f2, False, pa.node()),
615 615 "local copied/moved from " + f2)
616 616 elif f in ma: # clean, a different, no remote
617 617 if n1 != ma[f]:
618 618 if acceptremote:
619 619 actions[f] = ('r', None, "remote delete")
620 620 else:
621 621 actions[f] = ('cd', None, "prompt changed/deleted")
622 622 elif n1[20:] == 'a':
623 623 # This extra 'a' is added by working copy manifest to mark
624 624 # the file as locally added. We should forget it instead of
625 625 # deleting it.
626 626 actions[f] = ('f', None, "remote deleted")
627 627 else:
628 628 actions[f] = ('r', None, "other deleted")
629 629 elif n2: # file exists only on remote side
630 630 if f in copied:
631 631 pass # we'll deal with it on m1 side
632 632 elif f in movewithdir:
633 633 f2 = movewithdir[f]
634 634 if f2 in m1:
635 635 actions[f2] = ('m', (f2, f, None, False, pa.node()),
636 636 "local directory rename, both created")
637 637 else:
638 638 actions[f2] = ('dg', (f, fl2),
639 639 "local directory rename - get from " + f)
640 640 elif f in copy:
641 641 f2 = copy[f]
642 642 if f2 in m2:
643 643 actions[f] = ('m', (f2, f, f2, False, pa.node()),
644 644 "remote copied from " + f2)
645 645 else:
646 646 actions[f] = ('m', (f2, f, f2, True, pa.node()),
647 647 "remote moved from " + f2)
648 648 elif f not in ma:
649 649 # local unknown, remote created: the logic is described by the
650 650 # following table:
651 651 #
652 652 # force branchmerge different | action
653 653 # n * * | create
654 654 # y n * | create
655 655 # y y n | create
656 656 # y y y | merge
657 657 #
658 658 # Checking whether the files are different is expensive, so we
659 659 # don't do that when we can avoid it.
660 660 if not force:
661 661 actions[f] = ('c', (fl2,), "remote created")
662 662 elif not branchmerge:
663 663 actions[f] = ('c', (fl2,), "remote created")
664 664 else:
665 665 actions[f] = ('cm', (fl2, pa.node()),
666 666 "remote created, get or merge")
667 667 elif n2 != ma[f]:
668 668 if acceptremote:
669 669 actions[f] = ('c', (fl2,), "remote recreating")
670 670 else:
671 671 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
672 672
673 673 return actions, diverge, renamedelete
674 674
675 675 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
676 676 """Resolves false conflicts where the nodeid changed but the content
677 677 remained the same."""
678 678
679 679 for f, (m, args, msg) in actions.items():
680 680 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
681 681 # local did change but ended up with same content
682 682 actions[f] = 'r', None, "prompt same"
683 683 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
684 684 # remote did change but ended up with same content
685 685 del actions[f] # don't get = keep local deleted
686 686
687 687 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
688 688 acceptremote, followcopies):
689 689 "Calculate the actions needed to merge mctx into wctx using ancestors"
690 690
691 691 if len(ancestors) == 1: # default
692 692 actions, diverge, renamedelete = manifestmerge(
693 693 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
694 694 acceptremote, followcopies)
695 695 _checkunknownfiles(repo, wctx, mctx, force, actions)
696 696
697 697 else: # only when merge.preferancestor=* - the default
698 698 repo.ui.note(
699 699 _("note: merging %s and %s using bids from ancestors %s\n") %
700 700 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
701 701
702 702 # Call for bids
703 703 fbids = {} # mapping filename to bids (action method to list af actions)
704 704 diverge, renamedelete = None, None
705 705 for ancestor in ancestors:
706 706 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
707 707 actions, diverge1, renamedelete1 = manifestmerge(
708 708 repo, wctx, mctx, ancestor, branchmerge, force, partial,
709 709 acceptremote, followcopies)
710 710 _checkunknownfiles(repo, wctx, mctx, force, actions)
711 711
712 712 # Track the shortest set of warning on the theory that bid
713 713 # merge will correctly incorporate more information
714 714 if diverge is None or len(diverge1) < len(diverge):
715 715 diverge = diverge1
716 716 if renamedelete is None or len(renamedelete) < len(renamedelete1):
717 717 renamedelete = renamedelete1
718 718
719 719 for f, a in sorted(actions.iteritems()):
720 720 m, args, msg = a
721 721 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
722 722 if f in fbids:
723 723 d = fbids[f]
724 724 if m in d:
725 725 d[m].append(a)
726 726 else:
727 727 d[m] = [a]
728 728 else:
729 729 fbids[f] = {m: [a]}
730 730
731 731 # Pick the best bid for each file
732 732 repo.ui.note(_('\nauction for merging merge bids\n'))
733 733 actions = {}
734 734 for f, bids in sorted(fbids.items()):
735 735 # bids is a mapping from action method to list af actions
736 736 # Consensus?
737 737 if len(bids) == 1: # all bids are the same kind of method
738 738 m, l = bids.items()[0]
739 739 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
740 740 repo.ui.note(" %s: consensus for %s\n" % (f, m))
741 741 actions[f] = l[0]
742 742 continue
743 743 # If keep is an option, just do it.
744 744 if 'k' in bids:
745 745 repo.ui.note(" %s: picking 'keep' action\n" % f)
746 746 actions[f] = bids['k'][0]
747 747 continue
748 748 # If there are gets and they all agree [how could they not?], do it.
749 749 if 'g' in bids:
750 750 ga0 = bids['g'][0]
751 751 if all(a == ga0 for a in bids['g'][1:]):
752 752 repo.ui.note(" %s: picking 'get' action\n" % f)
753 753 actions[f] = ga0
754 754 continue
755 755 # TODO: Consider other simple actions such as mode changes
756 756 # Handle inefficient democrazy.
757 757 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
758 758 for m, l in sorted(bids.items()):
759 759 for _f, args, msg in l:
760 760 repo.ui.note(' %s -> %s\n' % (msg, m))
761 761 # Pick random action. TODO: Instead, prompt user when resolving
762 762 m, l = bids.items()[0]
763 763 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
764 764 (f, m))
765 765 actions[f] = l[0]
766 766 continue
767 767 repo.ui.note(_('end of auction\n\n'))
768 768
769 769 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
770 770
771 771 if wctx.rev() is None:
772 772 fractions = _forgetremoved(wctx, mctx, branchmerge)
773 773 actions.update(fractions)
774 774
775 775 return actions, diverge, renamedelete
776 776
777 777 def batchremove(repo, actions):
778 778 """apply removes to the working directory
779 779
780 780 yields tuples for progress updates
781 781 """
782 782 verbose = repo.ui.verbose
783 783 unlink = util.unlinkpath
784 784 wjoin = repo.wjoin
785 785 audit = repo.wvfs.audit
786 786 i = 0
787 787 for f, args, msg in actions:
788 788 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
789 789 if verbose:
790 790 repo.ui.note(_("removing %s\n") % f)
791 791 audit(f)
792 792 try:
793 793 unlink(wjoin(f), ignoremissing=True)
794 794 except OSError as inst:
795 795 repo.ui.warn(_("update failed to remove %s: %s!\n") %
796 796 (f, inst.strerror))
797 797 if i == 100:
798 798 yield i, f
799 799 i = 0
800 800 i += 1
801 801 if i > 0:
802 802 yield i, f
803 803
804 804 def batchget(repo, mctx, actions):
805 805 """apply gets to the working directory
806 806
807 807 mctx is the context to get from
808 808
809 809 yields tuples for progress updates
810 810 """
811 811 verbose = repo.ui.verbose
812 812 fctx = mctx.filectx
813 813 wwrite = repo.wwrite
814 814 i = 0
815 815 for f, args, msg in actions:
816 816 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
817 817 if verbose:
818 818 repo.ui.note(_("getting %s\n") % f)
819 819 wwrite(f, fctx(f).data(), args[0])
820 820 if i == 100:
821 821 yield i, f
822 822 i = 0
823 823 i += 1
824 824 if i > 0:
825 825 yield i, f
826 826
827 827 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
828 828 """apply the merge action list to the working directory
829 829
830 830 wctx is the working copy context
831 831 mctx is the context to be merged into the working copy
832 832
833 833 Return a tuple of counts (updated, merged, removed, unresolved) that
834 834 describes how many files were affected by the update.
835 835 """
836 836
837 837 updated, merged, removed, unresolved = 0, 0, 0, 0
838 838 ms = mergestate(repo)
839 839 ms.reset(wctx.p1().node(), mctx.node())
840 840 moves = []
841 841 for m, l in actions.items():
842 842 l.sort()
843 843
844 844 # prescan for merges
845 845 for f, args, msg in actions['m']:
846 846 f1, f2, fa, move, anc = args
847 847 if f == '.hgsubstate': # merged internally
848 848 continue
849 849 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
850 850 fcl = wctx[f1]
851 851 fco = mctx[f2]
852 852 actx = repo[anc]
853 853 if fa in actx:
854 854 fca = actx[fa]
855 855 else:
856 856 fca = repo.filectx(f1, fileid=nullrev)
857 857 ms.add(fcl, fco, fca, f)
858 858 if f1 != f and move:
859 859 moves.append(f1)
860 860
861 861 audit = repo.wvfs.audit
862 862 _updating = _('updating')
863 863 _files = _('files')
864 864 progress = repo.ui.progress
865 865
866 866 # remove renamed files after safely stored
867 867 for f in moves:
868 868 if os.path.lexists(repo.wjoin(f)):
869 869 repo.ui.debug("removing %s\n" % f)
870 870 audit(f)
871 871 util.unlinkpath(repo.wjoin(f))
872 872
873 873 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
874 874
875 875 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
876 876 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
877 877
878 878 # remove in parallel (must come first)
879 879 z = 0
880 880 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
881 881 for i, item in prog:
882 882 z += i
883 883 progress(_updating, z, item=item, total=numupdates, unit=_files)
884 884 removed = len(actions['r'])
885 885
886 886 # get in parallel
887 887 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
888 888 for i, item in prog:
889 889 z += i
890 890 progress(_updating, z, item=item, total=numupdates, unit=_files)
891 891 updated = len(actions['g'])
892 892
893 893 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
894 894 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
895 895
896 896 # forget (manifest only, just log it) (must come first)
897 897 for f, args, msg in actions['f']:
898 898 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
899 899 z += 1
900 900 progress(_updating, z, item=f, total=numupdates, unit=_files)
901 901
902 902 # re-add (manifest only, just log it)
903 903 for f, args, msg in actions['a']:
904 904 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
905 905 z += 1
906 906 progress(_updating, z, item=f, total=numupdates, unit=_files)
907 907
908 908 # keep (noop, just log it)
909 909 for f, args, msg in actions['k']:
910 910 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
911 911 # no progress
912 912
913 913 # directory rename, move local
914 914 for f, args, msg in actions['dm']:
915 915 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
916 916 z += 1
917 917 progress(_updating, z, item=f, total=numupdates, unit=_files)
918 918 f0, flags = args
919 919 repo.ui.note(_("moving %s to %s\n") % (f0, f))
920 920 audit(f)
921 921 repo.wwrite(f, wctx.filectx(f0).data(), flags)
922 922 util.unlinkpath(repo.wjoin(f0))
923 923 updated += 1
924 924
925 925 # local directory rename, get
926 926 for f, args, msg in actions['dg']:
927 927 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
928 928 z += 1
929 929 progress(_updating, z, item=f, total=numupdates, unit=_files)
930 930 f0, flags = args
931 931 repo.ui.note(_("getting %s to %s\n") % (f0, f))
932 932 repo.wwrite(f, mctx.filectx(f0).data(), flags)
933 933 updated += 1
934 934
935 935 # exec
936 936 for f, args, msg in actions['e']:
937 937 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
938 938 z += 1
939 939 progress(_updating, z, item=f, total=numupdates, unit=_files)
940 940 flags, = args
941 941 audit(f)
942 942 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
943 943 updated += 1
944 944
945 945 mergeactions = actions['m']
946 946 # the ordering is important here -- ms.mergedriver will raise if the merge
947 947 # driver has changed, and we want to be able to bypass it when overwrite is
948 948 # True
949 949 usemergedriver = not overwrite and mergeactions and ms.mergedriver
950 950
951 951 if usemergedriver:
952 952 ms.commit()
953 953 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
954 954 # the driver might leave some files unresolved
955 955 unresolvedf = set(ms.unresolved())
956 956 if not proceed:
957 957 # XXX setting unresolved to at least 1 is a hack to make sure we
958 958 # error out
959 959 return updated, merged, removed, max(len(unresolvedf), 1)
960 960 newactions = []
961 961 for f, args, msg in mergeactions:
962 962 if f in unresolvedf:
963 963 newactions.append((f, args, msg))
964 964 mergeactions = newactions
965 965
966 966 # premerge
967 967 tocomplete = []
968 968 for f, args, msg in actions['m']:
969 969 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
970 970 z += 1
971 971 progress(_updating, z, item=f, total=numupdates, unit=_files)
972 972 if f == '.hgsubstate': # subrepo states need updating
973 973 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
974 974 overwrite)
975 975 continue
976 976 audit(f)
977 977 complete, r = ms.preresolve(f, wctx, labels=labels)
978 978 if complete:
979 979 if r is not None and r > 0:
980 980 unresolved += 1
981 981 else:
982 982 if r is None:
983 983 updated += 1
984 984 else:
985 985 merged += 1
986 986 else:
987 987 numupdates += 1
988 988 tocomplete.append((f, args, msg))
989 989
990 990 # merge
991 991 for f, args, msg in tocomplete:
992 992 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
993 993 z += 1
994 994 progress(_updating, z, item=f, total=numupdates, unit=_files)
995 995 r = ms.resolve(f, wctx, labels=labels)
996 996 if r is not None and r > 0:
997 997 unresolved += 1
998 998 else:
999 999 if r is None:
1000 1000 updated += 1
1001 1001 else:
1002 1002 merged += 1
1003 1003
1004 1004 ms.commit()
1005 1005
1006 1006 if usemergedriver and not unresolved and ms.mdstate() != 's':
1007 1007 if not driverconclude(repo, ms, wctx, labels=labels):
1008 1008 # XXX setting unresolved to at least 1 is a hack to make sure we
1009 1009 # error out
1010 1010 return updated, merged, removed, max(unresolved, 1)
1011 1011
1012 1012 ms.commit()
1013 1013
1014 1014 progress(_updating, None, total=numupdates, unit=_files)
1015 1015
1016 1016 return updated, merged, removed, unresolved
1017 1017
1018 1018 def recordupdates(repo, actions, branchmerge):
1019 1019 "record merge actions to the dirstate"
1020 1020 # remove (must come first)
1021 1021 for f, args, msg in actions['r']:
1022 1022 if branchmerge:
1023 1023 repo.dirstate.remove(f)
1024 1024 else:
1025 1025 repo.dirstate.drop(f)
1026 1026
1027 1027 # forget (must come first)
1028 1028 for f, args, msg in actions['f']:
1029 1029 repo.dirstate.drop(f)
1030 1030
1031 1031 # re-add
1032 1032 for f, args, msg in actions['a']:
1033 1033 if not branchmerge:
1034 1034 repo.dirstate.add(f)
1035 1035
1036 1036 # exec change
1037 1037 for f, args, msg in actions['e']:
1038 1038 repo.dirstate.normallookup(f)
1039 1039
1040 1040 # keep
1041 1041 for f, args, msg in actions['k']:
1042 1042 pass
1043 1043
1044 1044 # get
1045 1045 for f, args, msg in actions['g']:
1046 1046 if branchmerge:
1047 1047 repo.dirstate.otherparent(f)
1048 1048 else:
1049 1049 repo.dirstate.normal(f)
1050 1050
1051 1051 # merge
1052 1052 for f, args, msg in actions['m']:
1053 1053 f1, f2, fa, move, anc = args
1054 1054 if branchmerge:
1055 1055 # We've done a branch merge, mark this file as merged
1056 1056 # so that we properly record the merger later
1057 1057 repo.dirstate.merge(f)
1058 1058 if f1 != f2: # copy/rename
1059 1059 if move:
1060 1060 repo.dirstate.remove(f1)
1061 1061 if f1 != f:
1062 1062 repo.dirstate.copy(f1, f)
1063 1063 else:
1064 1064 repo.dirstate.copy(f2, f)
1065 1065 else:
1066 1066 # We've update-merged a locally modified file, so
1067 1067 # we set the dirstate to emulate a normal checkout
1068 1068 # of that file some time in the past. Thus our
1069 1069 # merge will appear as a normal local file
1070 1070 # modification.
1071 1071 if f2 == f: # file not locally copied/moved
1072 1072 repo.dirstate.normallookup(f)
1073 1073 if move:
1074 1074 repo.dirstate.drop(f1)
1075 1075
1076 1076 # directory rename, move local
1077 1077 for f, args, msg in actions['dm']:
1078 1078 f0, flag = args
1079 1079 if branchmerge:
1080 1080 repo.dirstate.add(f)
1081 1081 repo.dirstate.remove(f0)
1082 1082 repo.dirstate.copy(f0, f)
1083 1083 else:
1084 1084 repo.dirstate.normal(f)
1085 1085 repo.dirstate.drop(f0)
1086 1086
1087 1087 # directory rename, get
1088 1088 for f, args, msg in actions['dg']:
1089 1089 f0, flag = args
1090 1090 if branchmerge:
1091 1091 repo.dirstate.add(f)
1092 1092 repo.dirstate.copy(f0, f)
1093 1093 else:
1094 1094 repo.dirstate.normal(f)
1095 1095
1096 1096 def update(repo, node, branchmerge, force, partial, ancestor=None,
1097 1097 mergeancestor=False, labels=None):
1098 1098 """
1099 1099 Perform a merge between the working directory and the given node
1100 1100
1101 1101 node = the node to update to, or None if unspecified
1102 1102 branchmerge = whether to merge between branches
1103 1103 force = whether to force branch merging or file overwriting
1104 1104 partial = a function to filter file lists (dirstate not updated)
1105 1105 mergeancestor = whether it is merging with an ancestor. If true,
1106 1106 we should accept the incoming changes for any prompts that occur.
1107 1107 If false, merging with an ancestor (fast-forward) is only allowed
1108 1108 between different named branches. This flag is used by rebase extension
1109 1109 as a temporary fix and should be avoided in general.
1110 1110
1111 1111 The table below shows all the behaviors of the update command
1112 1112 given the -c and -C or no options, whether the working directory
1113 1113 is dirty, whether a revision is specified, and the relationship of
1114 1114 the parent rev to the target rev (linear, on the same named
1115 1115 branch, or on another named branch).
1116 1116
1117 1117 This logic is tested by test-update-branches.t.
1118 1118
1119 1119 -c -C dirty rev | linear same cross
1120 1120 n n n n | ok (1) x
1121 1121 n n n y | ok ok ok
1122 1122 n n y n | merge (2) (2)
1123 1123 n n y y | merge (3) (3)
1124 1124 n y * * | discard discard discard
1125 1125 y n y * | (4) (4) (4)
1126 1126 y n n * | ok ok ok
1127 1127 y y * * | (5) (5) (5)
1128 1128
1129 1129 x = can't happen
1130 1130 * = don't-care
1131 1131 1 = abort: not a linear update (merge or update --check to force update)
1132 1132 2 = abort: uncommitted changes (commit and merge, or update --clean to
1133 1133 discard changes)
1134 1134 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1135 1135 4 = abort: uncommitted changes (checked in commands.py)
1136 1136 5 = incompatible options (checked in commands.py)
1137 1137
1138 1138 Return the same tuple as applyupdates().
1139 1139 """
1140 1140
1141 1141 onode = node
1142 1142 wlock = repo.wlock()
1143 1143 try:
1144 1144 wc = repo[None]
1145 1145 pl = wc.parents()
1146 1146 p1 = pl[0]
1147 1147 pas = [None]
1148 1148 if ancestor is not None:
1149 1149 pas = [repo[ancestor]]
1150 1150
1151 1151 if node is None:
1152 1152 if (repo.ui.configbool('devel', 'all-warnings')
1153 1153 or repo.ui.configbool('devel', 'oldapi')):
1154 1154 repo.ui.develwarn('update with no target')
1155 1155 rev, _mark, _act = destutil.destupdate(repo)
1156 1156 node = repo[rev].node()
1157 1157
1158 1158 overwrite = force and not branchmerge
1159 1159
1160 1160 p2 = repo[node]
1161 1161 if pas[0] is None:
1162 1162 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1163 1163 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1164 1164 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1165 1165 else:
1166 1166 pas = [p1.ancestor(p2, warn=branchmerge)]
1167 1167
1168 1168 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1169 1169
1170 1170 ### check phase
1171 1171 if not overwrite and len(pl) > 1:
1172 1172 raise error.Abort(_("outstanding uncommitted merge"))
1173 1173 if branchmerge:
1174 1174 if pas == [p2]:
1175 1175 raise error.Abort(_("merging with a working directory ancestor"
1176 1176 " has no effect"))
1177 1177 elif pas == [p1]:
1178 1178 if not mergeancestor and p1.branch() == p2.branch():
1179 1179 raise error.Abort(_("nothing to merge"),
1180 1180 hint=_("use 'hg update' "
1181 1181 "or check 'hg heads'"))
1182 1182 if not force and (wc.files() or wc.deleted()):
1183 1183 raise error.Abort(_("uncommitted changes"),
1184 1184 hint=_("use 'hg status' to list changes"))
1185 1185 for s in sorted(wc.substate):
1186 1186 wc.sub(s).bailifchanged()
1187 1187
1188 1188 elif not overwrite:
1189 1189 if p1 == p2: # no-op update
1190 1190 # call the hooks and exit early
1191 1191 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1192 1192 repo.hook('update', parent1=xp2, parent2='', error=0)
1193 1193 return 0, 0, 0, 0
1194 1194
1195 1195 if pas not in ([p1], [p2]): # nonlinear
1196 1196 dirty = wc.dirty(missing=True)
1197 1197 if dirty or onode is None:
1198 1198 # Branching is a bit strange to ensure we do the minimal
1199 1199 # amount of call to obsolete.background.
1200 1200 foreground = obsolete.foreground(repo, [p1.node()])
1201 1201 # note: the <node> variable contains a random identifier
1202 1202 if repo[node].node() in foreground:
1203 1203 pas = [p1] # allow updating to successors
1204 1204 elif dirty:
1205 1205 msg = _("uncommitted changes")
1206 1206 if onode is None:
1207 1207 hint = _("commit and merge, or update --clean to"
1208 1208 " discard changes")
1209 1209 else:
1210 1210 hint = _("commit or update --clean to discard"
1211 1211 " changes")
1212 1212 raise error.Abort(msg, hint=hint)
1213 1213 else: # node is none
1214 1214 msg = _("not a linear update")
1215 1215 hint = _("merge or update --check to force update")
1216 1216 raise error.Abort(msg, hint=hint)
1217 1217 else:
1218 1218 # Allow jumping branches if clean and specific rev given
1219 1219 pas = [p1]
1220 1220
1221 1221 # deprecated config: merge.followcopies
1222 1222 followcopies = False
1223 1223 if overwrite:
1224 1224 pas = [wc]
1225 1225 elif pas == [p2]: # backwards
1226 1226 pas = [wc.p1()]
1227 1227 elif not branchmerge and not wc.dirty(missing=True):
1228 1228 pass
1229 1229 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1230 1230 followcopies = True
1231 1231
1232 1232 ### calculate phase
1233 1233 actionbyfile, diverge, renamedelete = calculateupdates(
1234 1234 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1235 1235 followcopies)
1236 1236 # Convert to dictionary-of-lists format
1237 1237 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1238 1238 for f, (m, args, msg) in actionbyfile.iteritems():
1239 1239 if m not in actions:
1240 1240 actions[m] = []
1241 1241 actions[m].append((f, args, msg))
1242 1242
1243 1243 if not util.checkcase(repo.path):
1244 1244 # check collision between files only in p2 for clean update
1245 1245 if (not branchmerge and
1246 1246 (force or not wc.dirty(missing=True, branch=False))):
1247 1247 _checkcollision(repo, p2.manifest(), None)
1248 1248 else:
1249 1249 _checkcollision(repo, wc.manifest(), actions)
1250 1250
1251 1251 # Prompt and create actions. TODO: Move this towards resolve phase.
1252 1252 for f, args, msg in sorted(actions['cd']):
1253 1253 if repo.ui.promptchoice(
1254 1254 _("local changed %s which remote deleted\n"
1255 1255 "use (c)hanged version or (d)elete?"
1256 1256 "$$ &Changed $$ &Delete") % f, 0):
1257 1257 actions['r'].append((f, None, "prompt delete"))
1258 1258 else:
1259 1259 actions['a'].append((f, None, "prompt keep"))
1260 1260 del actions['cd'][:]
1261 1261
1262 1262 for f, args, msg in sorted(actions['dc']):
1263 1263 flags, = args
1264 1264 if repo.ui.promptchoice(
1265 1265 _("remote changed %s which local deleted\n"
1266 1266 "use (c)hanged version or leave (d)eleted?"
1267 1267 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1268 1268 actions['g'].append((f, (flags,), "prompt recreating"))
1269 1269 del actions['dc'][:]
1270 1270
1271 1271 ### apply phase
1272 1272 if not branchmerge: # just jump to the new rev
1273 1273 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1274 1274 if not partial:
1275 1275 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1276 1276 # note that we're in the middle of an update
1277 1277 repo.vfs.write('updatestate', p2.hex())
1278 1278
1279 1279 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1280 1280
1281 1281 # divergent renames
1282 1282 for f, fl in sorted(diverge.iteritems()):
1283 1283 repo.ui.warn(_("note: possible conflict - %s was renamed "
1284 1284 "multiple times to:\n") % f)
1285 1285 for nf in fl:
1286 1286 repo.ui.warn(" %s\n" % nf)
1287 1287
1288 1288 # rename and delete
1289 1289 for f, fl in sorted(renamedelete.iteritems()):
1290 1290 repo.ui.warn(_("note: possible conflict - %s was deleted "
1291 1291 "and renamed to:\n") % f)
1292 1292 for nf in fl:
1293 1293 repo.ui.warn(" %s\n" % nf)
1294 1294
1295 1295 if not partial:
1296 1296 repo.dirstate.beginparentchange()
1297 1297 repo.setparents(fp1, fp2)
1298 1298 recordupdates(repo, actions, branchmerge)
1299 1299 # update completed, clear state
1300 1300 util.unlink(repo.join('updatestate'))
1301 1301
1302 1302 if not branchmerge:
1303 1303 repo.dirstate.setbranch(p2.branch())
1304 1304 repo.dirstate.endparentchange()
1305 1305 finally:
1306 1306 wlock.release()
1307 1307
1308 1308 if not partial:
1309 1309 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1310 1310 return stats
1311 1311
1312 1312 def graft(repo, ctx, pctx, labels):
1313 1313 """Do a graft-like merge.
1314 1314
1315 1315 This is a merge where the merge ancestor is chosen such that one
1316 1316 or more changesets are grafted onto the current changeset. In
1317 1317 addition to the merge, this fixes up the dirstate to include only
1318 1318 a single parent and tries to duplicate any renames/copies
1319 1319 appropriately.
1320 1320
1321 1321 ctx - changeset to rebase
1322 1322 pctx - merge base, usually ctx.p1()
1323 1323 labels - merge labels eg ['local', 'graft']
1324 1324
1325 1325 """
1326 1326 # If we're grafting a descendant onto an ancestor, be sure to pass
1327 1327 # mergeancestor=True to update. This does two things: 1) allows the merge if
1328 1328 # the destination is the same as the parent of the ctx (so we can use graft
1329 1329 # to copy commits), and 2) informs update that the incoming changes are
1330 1330 # newer than the destination so it doesn't prompt about "remote changed foo
1331 1331 # which local deleted".
1332 1332 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1333 1333
1334 1334 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1335 1335 mergeancestor=mergeancestor, labels=labels)
1336 1336
1337 1337 # drop the second merge parent
1338 1338 repo.dirstate.beginparentchange()
1339 1339 repo.setparents(repo['.'].node(), nullid)
1340 1340 repo.dirstate.write(repo.currenttransaction())
1341 1341 # fix up dirstate for copies and renames
1342 1342 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1343 1343 repo.dirstate.endparentchange()
1344 1344 return stats
@@ -1,1172 +1,1183
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import wdirrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile, shutil, stat
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83
84 84 missing = set()
85 85
86 86 for subpath in ctx2.substate:
87 87 if subpath not in ctx1.substate:
88 88 del subpaths[subpath]
89 89 missing.add(subpath)
90 90
91 91 for subpath, ctx in sorted(subpaths.iteritems()):
92 92 yield subpath, ctx.sub(subpath)
93 93
94 94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 95 # status and diff will have an accurate result when it does
96 96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 97 # against itself.
98 98 for subpath in missing:
99 99 yield subpath, ctx2.nullsub(subpath, ctx1)
100 100
101 101 def nochangesfound(ui, repo, excluded=None):
102 102 '''Report no changes for push/pull, excluded is None or a list of
103 103 nodes excluded from the push/pull.
104 104 '''
105 105 secretlist = []
106 106 if excluded:
107 107 for n in excluded:
108 108 if n not in repo:
109 109 # discovery should not have included the filtered revision,
110 110 # we have to explicitly exclude it until discovery is cleanup.
111 111 continue
112 112 ctx = repo[n]
113 113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 114 secretlist.append(n)
115 115
116 116 if secretlist:
117 117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 118 % len(secretlist))
119 119 else:
120 120 ui.status(_("no changes found\n"))
121 121
122 122 def checknewlabel(repo, lbl, kind):
123 123 # Do not use the "kind" parameter in ui output.
124 124 # It makes strings difficult to translate.
125 125 if lbl in ['tip', '.', 'null']:
126 126 raise error.Abort(_("the name '%s' is reserved") % lbl)
127 127 for c in (':', '\0', '\n', '\r'):
128 128 if c in lbl:
129 129 raise error.Abort(_("%r cannot be used in a name") % c)
130 130 try:
131 131 int(lbl)
132 132 raise error.Abort(_("cannot use an integer as a name"))
133 133 except ValueError:
134 134 pass
135 135
136 136 def checkfilename(f):
137 137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 138 if '\r' in f or '\n' in f:
139 139 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140 140
141 141 def checkportable(ui, f):
142 142 '''Check if filename f is portable and warn or abort depending on config'''
143 143 checkfilename(f)
144 144 abort, warn = checkportabilityalert(ui)
145 145 if abort or warn:
146 146 msg = util.checkwinfilename(f)
147 147 if msg:
148 148 msg = "%s: %r" % (msg, f)
149 149 if abort:
150 150 raise error.Abort(msg)
151 151 ui.warn(_("warning: %s\n") % msg)
152 152
153 153 def checkportabilityalert(ui):
154 154 '''check if the user's config requests nothing, a warning, or abort for
155 155 non-portable filenames'''
156 156 val = ui.config('ui', 'portablefilenames', 'warn')
157 157 lval = val.lower()
158 158 bval = util.parsebool(val)
159 159 abort = os.name == 'nt' or lval == 'abort'
160 160 warn = bval or lval == 'warn'
161 161 if bval is None and not (warn or abort or lval == 'ignore'):
162 162 raise error.ConfigError(
163 163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 164 return abort, warn
165 165
166 166 class casecollisionauditor(object):
167 167 def __init__(self, ui, abort, dirstate):
168 168 self._ui = ui
169 169 self._abort = abort
170 170 allfiles = '\0'.join(dirstate._map)
171 171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 172 self._dirstate = dirstate
173 173 # The purpose of _newfiles is so that we don't complain about
174 174 # case collisions if someone were to call this object with the
175 175 # same filename twice.
176 176 self._newfiles = set()
177 177
178 178 def __call__(self, f):
179 179 if f in self._newfiles:
180 180 return
181 181 fl = encoding.lower(f)
182 182 if fl in self._loweredfiles and f not in self._dirstate:
183 183 msg = _('possible case-folding collision for %s') % f
184 184 if self._abort:
185 185 raise error.Abort(msg)
186 186 self._ui.warn(_("warning: %s\n") % msg)
187 187 self._loweredfiles.add(fl)
188 188 self._newfiles.add(f)
189 189
190 190 def filteredhash(repo, maxrev):
191 191 """build hash of filtered revisions in the current repoview.
192 192
193 193 Multiple caches perform up-to-date validation by checking that the
194 194 tiprev and tipnode stored in the cache file match the current repository.
195 195 However, this is not sufficient for validating repoviews because the set
196 196 of revisions in the view may change without the repository tiprev and
197 197 tipnode changing.
198 198
199 199 This function hashes all the revs filtered from the view and returns
200 200 that SHA-1 digest.
201 201 """
202 202 cl = repo.changelog
203 203 if not cl.filteredrevs:
204 204 return None
205 205 key = None
206 206 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
207 207 if revs:
208 208 s = util.sha1()
209 209 for rev in revs:
210 210 s.update('%s;' % rev)
211 211 key = s.digest()
212 212 return key
213 213
214 214 class abstractvfs(object):
215 215 """Abstract base class; cannot be instantiated"""
216 216
217 217 def __init__(self, *args, **kwargs):
218 218 '''Prevent instantiation; don't call this from subclasses.'''
219 219 raise NotImplementedError('attempted instantiating ' + str(type(self)))
220 220
221 221 def tryread(self, path):
222 222 '''gracefully return an empty string for missing files'''
223 223 try:
224 224 return self.read(path)
225 225 except IOError as inst:
226 226 if inst.errno != errno.ENOENT:
227 227 raise
228 228 return ""
229 229
230 230 def tryreadlines(self, path, mode='rb'):
231 231 '''gracefully return an empty array for missing files'''
232 232 try:
233 233 return self.readlines(path, mode=mode)
234 234 except IOError as inst:
235 235 if inst.errno != errno.ENOENT:
236 236 raise
237 237 return []
238 238
239 239 def open(self, path, mode="r", text=False, atomictemp=False,
240 240 notindexed=False):
241 241 '''Open ``path`` file, which is relative to vfs root.
242 242
243 243 Newly created directories are marked as "not to be indexed by
244 244 the content indexing service", if ``notindexed`` is specified
245 245 for "write" mode access.
246 246 '''
247 247 self.open = self.__call__
248 248 return self.__call__(path, mode, text, atomictemp, notindexed)
249 249
250 250 def read(self, path):
251 251 fp = self(path, 'rb')
252 252 try:
253 253 return fp.read()
254 254 finally:
255 255 fp.close()
256 256
257 257 def readlines(self, path, mode='rb'):
258 258 fp = self(path, mode=mode)
259 259 try:
260 260 return fp.readlines()
261 261 finally:
262 262 fp.close()
263 263
264 264 def write(self, path, data):
265 265 fp = self(path, 'wb')
266 266 try:
267 267 return fp.write(data)
268 268 finally:
269 269 fp.close()
270 270
271 271 def writelines(self, path, data, mode='wb', notindexed=False):
272 272 fp = self(path, mode=mode, notindexed=notindexed)
273 273 try:
274 274 return fp.writelines(data)
275 275 finally:
276 276 fp.close()
277 277
278 278 def append(self, path, data):
279 279 fp = self(path, 'ab')
280 280 try:
281 281 return fp.write(data)
282 282 finally:
283 283 fp.close()
284 284
285 285 def basename(self, path):
286 286 """return base element of a path (as os.path.basename would do)
287 287
288 288 This exists to allow handling of strange encoding if needed."""
289 289 return os.path.basename(path)
290 290
291 291 def chmod(self, path, mode):
292 292 return os.chmod(self.join(path), mode)
293 293
294 294 def dirname(self, path):
295 295 """return dirname element of a path (as os.path.dirname would do)
296 296
297 297 This exists to allow handling of strange encoding if needed."""
298 298 return os.path.dirname(path)
299 299
300 300 def exists(self, path=None):
301 301 return os.path.exists(self.join(path))
302 302
303 303 def fstat(self, fp):
304 304 return util.fstat(fp)
305 305
306 306 def isdir(self, path=None):
307 307 return os.path.isdir(self.join(path))
308 308
309 309 def isfile(self, path=None):
310 310 return os.path.isfile(self.join(path))
311 311
312 312 def islink(self, path=None):
313 313 return os.path.islink(self.join(path))
314 314
315 def isfileorlink(self, path=None):
316 '''return whether path is a regular file or a symlink
317
318 Unlike isfile, this doesn't follow symlinks.'''
319 try:
320 st = self.lstat(path)
321 except OSError:
322 return False
323 mode = st.st_mode
324 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
325
315 326 def reljoin(self, *paths):
316 327 """join various elements of a path together (as os.path.join would do)
317 328
318 329 The vfs base is not injected so that path stay relative. This exists
319 330 to allow handling of strange encoding if needed."""
320 331 return os.path.join(*paths)
321 332
322 333 def split(self, path):
323 334 """split top-most element of a path (as os.path.split would do)
324 335
325 336 This exists to allow handling of strange encoding if needed."""
326 337 return os.path.split(path)
327 338
328 339 def lexists(self, path=None):
329 340 return os.path.lexists(self.join(path))
330 341
331 342 def lstat(self, path=None):
332 343 return os.lstat(self.join(path))
333 344
334 345 def listdir(self, path=None):
335 346 return os.listdir(self.join(path))
336 347
337 348 def makedir(self, path=None, notindexed=True):
338 349 return util.makedir(self.join(path), notindexed)
339 350
340 351 def makedirs(self, path=None, mode=None):
341 352 return util.makedirs(self.join(path), mode)
342 353
343 354 def makelock(self, info, path):
344 355 return util.makelock(info, self.join(path))
345 356
346 357 def mkdir(self, path=None):
347 358 return os.mkdir(self.join(path))
348 359
349 360 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
350 361 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
351 362 dir=self.join(dir), text=text)
352 363 dname, fname = util.split(name)
353 364 if dir:
354 365 return fd, os.path.join(dir, fname)
355 366 else:
356 367 return fd, fname
357 368
358 369 def readdir(self, path=None, stat=None, skip=None):
359 370 return osutil.listdir(self.join(path), stat, skip)
360 371
361 372 def readlock(self, path):
362 373 return util.readlock(self.join(path))
363 374
364 375 def rename(self, src, dst):
365 376 return util.rename(self.join(src), self.join(dst))
366 377
367 378 def readlink(self, path):
368 379 return os.readlink(self.join(path))
369 380
370 381 def removedirs(self, path=None):
371 382 """Remove a leaf directory and all empty intermediate ones
372 383 """
373 384 return util.removedirs(self.join(path))
374 385
375 386 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
376 387 """Remove a directory tree recursively
377 388
378 389 If ``forcibly``, this tries to remove READ-ONLY files, too.
379 390 """
380 391 if forcibly:
381 392 def onerror(function, path, excinfo):
382 393 if function is not os.remove:
383 394 raise
384 395 # read-only files cannot be unlinked under Windows
385 396 s = os.stat(path)
386 397 if (s.st_mode & stat.S_IWRITE) != 0:
387 398 raise
388 399 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
389 400 os.remove(path)
390 401 else:
391 402 onerror = None
392 403 return shutil.rmtree(self.join(path),
393 404 ignore_errors=ignore_errors, onerror=onerror)
394 405
395 406 def setflags(self, path, l, x):
396 407 return util.setflags(self.join(path), l, x)
397 408
398 409 def stat(self, path=None):
399 410 return os.stat(self.join(path))
400 411
401 412 def unlink(self, path=None):
402 413 return util.unlink(self.join(path))
403 414
404 415 def unlinkpath(self, path=None, ignoremissing=False):
405 416 return util.unlinkpath(self.join(path), ignoremissing)
406 417
407 418 def utime(self, path=None, t=None):
408 419 return os.utime(self.join(path), t)
409 420
410 421 def walk(self, path=None, onerror=None):
411 422 """Yield (dirpath, dirs, files) tuple for each directories under path
412 423
413 424 ``dirpath`` is relative one from the root of this vfs. This
414 425 uses ``os.sep`` as path separator, even you specify POSIX
415 426 style ``path``.
416 427
417 428 "The root of this vfs" is represented as empty ``dirpath``.
418 429 """
419 430 root = os.path.normpath(self.join(None))
420 431 # when dirpath == root, dirpath[prefixlen:] becomes empty
421 432 # because len(dirpath) < prefixlen.
422 433 prefixlen = len(pathutil.normasprefix(root))
423 434 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
424 435 yield (dirpath[prefixlen:], dirs, files)
425 436
426 437 class vfs(abstractvfs):
427 438 '''Operate files relative to a base directory
428 439
429 440 This class is used to hide the details of COW semantics and
430 441 remote file access from higher level code.
431 442 '''
432 443 def __init__(self, base, audit=True, expandpath=False, realpath=False):
433 444 if expandpath:
434 445 base = util.expandpath(base)
435 446 if realpath:
436 447 base = os.path.realpath(base)
437 448 self.base = base
438 449 self._setmustaudit(audit)
439 450 self.createmode = None
440 451 self._trustnlink = None
441 452
442 453 def _getmustaudit(self):
443 454 return self._audit
444 455
445 456 def _setmustaudit(self, onoff):
446 457 self._audit = onoff
447 458 if onoff:
448 459 self.audit = pathutil.pathauditor(self.base)
449 460 else:
450 461 self.audit = util.always
451 462
452 463 mustaudit = property(_getmustaudit, _setmustaudit)
453 464
454 465 @util.propertycache
455 466 def _cansymlink(self):
456 467 return util.checklink(self.base)
457 468
458 469 @util.propertycache
459 470 def _chmod(self):
460 471 return util.checkexec(self.base)
461 472
462 473 def _fixfilemode(self, name):
463 474 if self.createmode is None or not self._chmod:
464 475 return
465 476 os.chmod(name, self.createmode & 0o666)
466 477
467 478 def __call__(self, path, mode="r", text=False, atomictemp=False,
468 479 notindexed=False):
469 480 '''Open ``path`` file, which is relative to vfs root.
470 481
471 482 Newly created directories are marked as "not to be indexed by
472 483 the content indexing service", if ``notindexed`` is specified
473 484 for "write" mode access.
474 485 '''
475 486 if self._audit:
476 487 r = util.checkosfilename(path)
477 488 if r:
478 489 raise error.Abort("%s: %r" % (r, path))
479 490 self.audit(path)
480 491 f = self.join(path)
481 492
482 493 if not text and "b" not in mode:
483 494 mode += "b" # for that other OS
484 495
485 496 nlink = -1
486 497 if mode not in ('r', 'rb'):
487 498 dirname, basename = util.split(f)
488 499 # If basename is empty, then the path is malformed because it points
489 500 # to a directory. Let the posixfile() call below raise IOError.
490 501 if basename:
491 502 if atomictemp:
492 503 util.ensuredirs(dirname, self.createmode, notindexed)
493 504 return util.atomictempfile(f, mode, self.createmode)
494 505 try:
495 506 if 'w' in mode:
496 507 util.unlink(f)
497 508 nlink = 0
498 509 else:
499 510 # nlinks() may behave differently for files on Windows
500 511 # shares if the file is open.
501 512 fd = util.posixfile(f)
502 513 nlink = util.nlinks(f)
503 514 if nlink < 1:
504 515 nlink = 2 # force mktempcopy (issue1922)
505 516 fd.close()
506 517 except (OSError, IOError) as e:
507 518 if e.errno != errno.ENOENT:
508 519 raise
509 520 nlink = 0
510 521 util.ensuredirs(dirname, self.createmode, notindexed)
511 522 if nlink > 0:
512 523 if self._trustnlink is None:
513 524 self._trustnlink = nlink > 1 or util.checknlink(f)
514 525 if nlink > 1 or not self._trustnlink:
515 526 util.rename(util.mktempcopy(f), f)
516 527 fp = util.posixfile(f, mode)
517 528 if nlink == 0:
518 529 self._fixfilemode(f)
519 530 return fp
520 531
521 532 def symlink(self, src, dst):
522 533 self.audit(dst)
523 534 linkname = self.join(dst)
524 535 try:
525 536 os.unlink(linkname)
526 537 except OSError:
527 538 pass
528 539
529 540 util.ensuredirs(os.path.dirname(linkname), self.createmode)
530 541
531 542 if self._cansymlink:
532 543 try:
533 544 os.symlink(src, linkname)
534 545 except OSError as err:
535 546 raise OSError(err.errno, _('could not symlink to %r: %s') %
536 547 (src, err.strerror), linkname)
537 548 else:
538 549 self.write(dst, src)
539 550
540 551 def join(self, path, *insidef):
541 552 if path:
542 553 return os.path.join(self.base, path, *insidef)
543 554 else:
544 555 return self.base
545 556
546 557 opener = vfs
547 558
548 559 class auditvfs(object):
549 560 def __init__(self, vfs):
550 561 self.vfs = vfs
551 562
552 563 def _getmustaudit(self):
553 564 return self.vfs.mustaudit
554 565
555 566 def _setmustaudit(self, onoff):
556 567 self.vfs.mustaudit = onoff
557 568
558 569 mustaudit = property(_getmustaudit, _setmustaudit)
559 570
560 571 class filtervfs(abstractvfs, auditvfs):
561 572 '''Wrapper vfs for filtering filenames with a function.'''
562 573
563 574 def __init__(self, vfs, filter):
564 575 auditvfs.__init__(self, vfs)
565 576 self._filter = filter
566 577
567 578 def __call__(self, path, *args, **kwargs):
568 579 return self.vfs(self._filter(path), *args, **kwargs)
569 580
570 581 def join(self, path, *insidef):
571 582 if path:
572 583 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
573 584 else:
574 585 return self.vfs.join(path)
575 586
576 587 filteropener = filtervfs
577 588
578 589 class readonlyvfs(abstractvfs, auditvfs):
579 590 '''Wrapper vfs preventing any writing.'''
580 591
581 592 def __init__(self, vfs):
582 593 auditvfs.__init__(self, vfs)
583 594
584 595 def __call__(self, path, mode='r', *args, **kw):
585 596 if mode not in ('r', 'rb'):
586 597 raise error.Abort('this vfs is read only')
587 598 return self.vfs(path, mode, *args, **kw)
588 599
589 600 def join(self, path, *insidef):
590 601 return self.vfs.join(path, *insidef)
591 602
592 603 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
593 604 '''yield every hg repository under path, always recursively.
594 605 The recurse flag will only control recursion into repo working dirs'''
595 606 def errhandler(err):
596 607 if err.filename == path:
597 608 raise err
598 609 samestat = getattr(os.path, 'samestat', None)
599 610 if followsym and samestat is not None:
600 611 def adddir(dirlst, dirname):
601 612 match = False
602 613 dirstat = os.stat(dirname)
603 614 for lstdirstat in dirlst:
604 615 if samestat(dirstat, lstdirstat):
605 616 match = True
606 617 break
607 618 if not match:
608 619 dirlst.append(dirstat)
609 620 return not match
610 621 else:
611 622 followsym = False
612 623
613 624 if (seen_dirs is None) and followsym:
614 625 seen_dirs = []
615 626 adddir(seen_dirs, path)
616 627 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
617 628 dirs.sort()
618 629 if '.hg' in dirs:
619 630 yield root # found a repository
620 631 qroot = os.path.join(root, '.hg', 'patches')
621 632 if os.path.isdir(os.path.join(qroot, '.hg')):
622 633 yield qroot # we have a patch queue repo here
623 634 if recurse:
624 635 # avoid recursing inside the .hg directory
625 636 dirs.remove('.hg')
626 637 else:
627 638 dirs[:] = [] # don't descend further
628 639 elif followsym:
629 640 newdirs = []
630 641 for d in dirs:
631 642 fname = os.path.join(root, d)
632 643 if adddir(seen_dirs, fname):
633 644 if os.path.islink(fname):
634 645 for hgname in walkrepos(fname, True, seen_dirs):
635 646 yield hgname
636 647 else:
637 648 newdirs.append(d)
638 649 dirs[:] = newdirs
639 650
640 651 def osrcpath():
641 652 '''return default os-specific hgrc search path'''
642 653 path = []
643 654 defaultpath = os.path.join(util.datapath, 'default.d')
644 655 if os.path.isdir(defaultpath):
645 656 for f, kind in osutil.listdir(defaultpath):
646 657 if f.endswith('.rc'):
647 658 path.append(os.path.join(defaultpath, f))
648 659 path.extend(systemrcpath())
649 660 path.extend(userrcpath())
650 661 path = [os.path.normpath(f) for f in path]
651 662 return path
652 663
653 664 _rcpath = None
654 665
655 666 def rcpath():
656 667 '''return hgrc search path. if env var HGRCPATH is set, use it.
657 668 for each item in path, if directory, use files ending in .rc,
658 669 else use item.
659 670 make HGRCPATH empty to only look in .hg/hgrc of current repo.
660 671 if no HGRCPATH, use default os-specific path.'''
661 672 global _rcpath
662 673 if _rcpath is None:
663 674 if 'HGRCPATH' in os.environ:
664 675 _rcpath = []
665 676 for p in os.environ['HGRCPATH'].split(os.pathsep):
666 677 if not p:
667 678 continue
668 679 p = util.expandpath(p)
669 680 if os.path.isdir(p):
670 681 for f, kind in osutil.listdir(p):
671 682 if f.endswith('.rc'):
672 683 _rcpath.append(os.path.join(p, f))
673 684 else:
674 685 _rcpath.append(p)
675 686 else:
676 687 _rcpath = osrcpath()
677 688 return _rcpath
678 689
679 690 def intrev(rev):
680 691 """Return integer for a given revision that can be used in comparison or
681 692 arithmetic operation"""
682 693 if rev is None:
683 694 return wdirrev
684 695 return rev
685 696
686 697 def revsingle(repo, revspec, default='.'):
687 698 if not revspec and revspec != 0:
688 699 return repo[default]
689 700
690 701 l = revrange(repo, [revspec])
691 702 if not l:
692 703 raise error.Abort(_('empty revision set'))
693 704 return repo[l.last()]
694 705
695 706 def _pairspec(revspec):
696 707 tree = revset.parse(revspec)
697 708 tree = revset.optimize(tree, True)[1] # fix up "x^:y" -> "(x^):y"
698 709 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
699 710
700 711 def revpair(repo, revs):
701 712 if not revs:
702 713 return repo.dirstate.p1(), None
703 714
704 715 l = revrange(repo, revs)
705 716
706 717 if not l:
707 718 first = second = None
708 719 elif l.isascending():
709 720 first = l.min()
710 721 second = l.max()
711 722 elif l.isdescending():
712 723 first = l.max()
713 724 second = l.min()
714 725 else:
715 726 first = l.first()
716 727 second = l.last()
717 728
718 729 if first is None:
719 730 raise error.Abort(_('empty revision range'))
720 731 if (first == second and len(revs) >= 2
721 732 and not all(revrange(repo, [r]) for r in revs)):
722 733 raise error.Abort(_('empty revision on one side of range'))
723 734
724 735 # if top-level is range expression, the result must always be a pair
725 736 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
726 737 return repo.lookup(first), None
727 738
728 739 return repo.lookup(first), repo.lookup(second)
729 740
730 741 def revrange(repo, revs):
731 742 """Yield revision as strings from a list of revision specifications."""
732 743 allspecs = []
733 744 for spec in revs:
734 745 if isinstance(spec, int):
735 746 spec = revset.formatspec('rev(%d)', spec)
736 747 allspecs.append(spec)
737 748 m = revset.matchany(repo.ui, allspecs, repo)
738 749 return m(repo)
739 750
740 751 def meaningfulparents(repo, ctx):
741 752 """Return list of meaningful (or all if debug) parentrevs for rev.
742 753
743 754 For merges (two non-nullrev revisions) both parents are meaningful.
744 755 Otherwise the first parent revision is considered meaningful if it
745 756 is not the preceding revision.
746 757 """
747 758 parents = ctx.parents()
748 759 if len(parents) > 1:
749 760 return parents
750 761 if repo.ui.debugflag:
751 762 return [parents[0], repo['null']]
752 763 if parents[0].rev() >= intrev(ctx.rev()) - 1:
753 764 return []
754 765 return parents
755 766
756 767 def expandpats(pats):
757 768 '''Expand bare globs when running on windows.
758 769 On posix we assume it already has already been done by sh.'''
759 770 if not util.expandglobs:
760 771 return list(pats)
761 772 ret = []
762 773 for kindpat in pats:
763 774 kind, pat = matchmod._patsplit(kindpat, None)
764 775 if kind is None:
765 776 try:
766 777 globbed = glob.glob(pat)
767 778 except re.error:
768 779 globbed = [pat]
769 780 if globbed:
770 781 ret.extend(globbed)
771 782 continue
772 783 ret.append(kindpat)
773 784 return ret
774 785
775 786 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
776 787 badfn=None):
777 788 '''Return a matcher and the patterns that were used.
778 789 The matcher will warn about bad matches, unless an alternate badfn callback
779 790 is provided.'''
780 791 if pats == ("",):
781 792 pats = []
782 793 if opts is None:
783 794 opts = {}
784 795 if not globbed and default == 'relpath':
785 796 pats = expandpats(pats or [])
786 797
787 798 def bad(f, msg):
788 799 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
789 800
790 801 if badfn is None:
791 802 badfn = bad
792 803
793 804 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
794 805 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
795 806
796 807 if m.always():
797 808 pats = []
798 809 return m, pats
799 810
800 811 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
801 812 badfn=None):
802 813 '''Return a matcher that will warn about bad matches.'''
803 814 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
804 815
805 816 def matchall(repo):
806 817 '''Return a matcher that will efficiently match everything.'''
807 818 return matchmod.always(repo.root, repo.getcwd())
808 819
809 820 def matchfiles(repo, files, badfn=None):
810 821 '''Return a matcher that will efficiently match exactly these files.'''
811 822 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
812 823
813 824 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
814 825 if opts is None:
815 826 opts = {}
816 827 m = matcher
817 828 if dry_run is None:
818 829 dry_run = opts.get('dry_run')
819 830 if similarity is None:
820 831 similarity = float(opts.get('similarity') or 0)
821 832
822 833 ret = 0
823 834 join = lambda f: os.path.join(prefix, f)
824 835
825 836 def matchessubrepo(matcher, subpath):
826 837 if matcher.exact(subpath):
827 838 return True
828 839 for f in matcher.files():
829 840 if f.startswith(subpath):
830 841 return True
831 842 return False
832 843
833 844 wctx = repo[None]
834 845 for subpath in sorted(wctx.substate):
835 846 if opts.get('subrepos') or matchessubrepo(m, subpath):
836 847 sub = wctx.sub(subpath)
837 848 try:
838 849 submatch = matchmod.narrowmatcher(subpath, m)
839 850 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
840 851 ret = 1
841 852 except error.LookupError:
842 853 repo.ui.status(_("skipping missing subrepository: %s\n")
843 854 % join(subpath))
844 855
845 856 rejected = []
846 857 def badfn(f, msg):
847 858 if f in m.files():
848 859 m.bad(f, msg)
849 860 rejected.append(f)
850 861
851 862 badmatch = matchmod.badmatch(m, badfn)
852 863 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
853 864 badmatch)
854 865
855 866 unknownset = set(unknown + forgotten)
856 867 toprint = unknownset.copy()
857 868 toprint.update(deleted)
858 869 for abs in sorted(toprint):
859 870 if repo.ui.verbose or not m.exact(abs):
860 871 if abs in unknownset:
861 872 status = _('adding %s\n') % m.uipath(abs)
862 873 else:
863 874 status = _('removing %s\n') % m.uipath(abs)
864 875 repo.ui.status(status)
865 876
866 877 renames = _findrenames(repo, m, added + unknown, removed + deleted,
867 878 similarity)
868 879
869 880 if not dry_run:
870 881 _markchanges(repo, unknown + forgotten, deleted, renames)
871 882
872 883 for f in rejected:
873 884 if f in m.files():
874 885 return 1
875 886 return ret
876 887
877 888 def marktouched(repo, files, similarity=0.0):
878 889 '''Assert that files have somehow been operated upon. files are relative to
879 890 the repo root.'''
880 891 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
881 892 rejected = []
882 893
883 894 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
884 895
885 896 if repo.ui.verbose:
886 897 unknownset = set(unknown + forgotten)
887 898 toprint = unknownset.copy()
888 899 toprint.update(deleted)
889 900 for abs in sorted(toprint):
890 901 if abs in unknownset:
891 902 status = _('adding %s\n') % abs
892 903 else:
893 904 status = _('removing %s\n') % abs
894 905 repo.ui.status(status)
895 906
896 907 renames = _findrenames(repo, m, added + unknown, removed + deleted,
897 908 similarity)
898 909
899 910 _markchanges(repo, unknown + forgotten, deleted, renames)
900 911
901 912 for f in rejected:
902 913 if f in m.files():
903 914 return 1
904 915 return 0
905 916
906 917 def _interestingfiles(repo, matcher):
907 918 '''Walk dirstate with matcher, looking for files that addremove would care
908 919 about.
909 920
910 921 This is different from dirstate.status because it doesn't care about
911 922 whether files are modified or clean.'''
912 923 added, unknown, deleted, removed, forgotten = [], [], [], [], []
913 924 audit_path = pathutil.pathauditor(repo.root)
914 925
915 926 ctx = repo[None]
916 927 dirstate = repo.dirstate
917 928 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
918 929 full=False)
919 930 for abs, st in walkresults.iteritems():
920 931 dstate = dirstate[abs]
921 932 if dstate == '?' and audit_path.check(abs):
922 933 unknown.append(abs)
923 934 elif dstate != 'r' and not st:
924 935 deleted.append(abs)
925 936 elif dstate == 'r' and st:
926 937 forgotten.append(abs)
927 938 # for finding renames
928 939 elif dstate == 'r' and not st:
929 940 removed.append(abs)
930 941 elif dstate == 'a':
931 942 added.append(abs)
932 943
933 944 return added, unknown, deleted, removed, forgotten
934 945
935 946 def _findrenames(repo, matcher, added, removed, similarity):
936 947 '''Find renames from removed files to added ones.'''
937 948 renames = {}
938 949 if similarity > 0:
939 950 for old, new, score in similar.findrenames(repo, added, removed,
940 951 similarity):
941 952 if (repo.ui.verbose or not matcher.exact(old)
942 953 or not matcher.exact(new)):
943 954 repo.ui.status(_('recording removal of %s as rename to %s '
944 955 '(%d%% similar)\n') %
945 956 (matcher.rel(old), matcher.rel(new),
946 957 score * 100))
947 958 renames[new] = old
948 959 return renames
949 960
950 961 def _markchanges(repo, unknown, deleted, renames):
951 962 '''Marks the files in unknown as added, the files in deleted as removed,
952 963 and the files in renames as copied.'''
953 964 wctx = repo[None]
954 965 wlock = repo.wlock()
955 966 try:
956 967 wctx.forget(deleted)
957 968 wctx.add(unknown)
958 969 for new, old in renames.iteritems():
959 970 wctx.copy(old, new)
960 971 finally:
961 972 wlock.release()
962 973
963 974 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
964 975 """Update the dirstate to reflect the intent of copying src to dst. For
965 976 different reasons it might not end with dst being marked as copied from src.
966 977 """
967 978 origsrc = repo.dirstate.copied(src) or src
968 979 if dst == origsrc: # copying back a copy?
969 980 if repo.dirstate[dst] not in 'mn' and not dryrun:
970 981 repo.dirstate.normallookup(dst)
971 982 else:
972 983 if repo.dirstate[origsrc] == 'a' and origsrc == src:
973 984 if not ui.quiet:
974 985 ui.warn(_("%s has not been committed yet, so no copy "
975 986 "data will be stored for %s.\n")
976 987 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
977 988 if repo.dirstate[dst] in '?r' and not dryrun:
978 989 wctx.add([dst])
979 990 elif not dryrun:
980 991 wctx.copy(origsrc, dst)
981 992
982 993 def readrequires(opener, supported):
983 994 '''Reads and parses .hg/requires and checks if all entries found
984 995 are in the list of supported features.'''
985 996 requirements = set(opener.read("requires").splitlines())
986 997 missings = []
987 998 for r in requirements:
988 999 if r not in supported:
989 1000 if not r or not r[0].isalnum():
990 1001 raise error.RequirementError(_(".hg/requires file is corrupt"))
991 1002 missings.append(r)
992 1003 missings.sort()
993 1004 if missings:
994 1005 raise error.RequirementError(
995 1006 _("repository requires features unknown to this Mercurial: %s")
996 1007 % " ".join(missings),
997 1008 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
998 1009 " for more information"))
999 1010 return requirements
1000 1011
1001 1012 def writerequires(opener, requirements):
1002 1013 reqfile = opener("requires", "w")
1003 1014 for r in sorted(requirements):
1004 1015 reqfile.write("%s\n" % r)
1005 1016 reqfile.close()
1006 1017
1007 1018 class filecachesubentry(object):
1008 1019 def __init__(self, path, stat):
1009 1020 self.path = path
1010 1021 self.cachestat = None
1011 1022 self._cacheable = None
1012 1023
1013 1024 if stat:
1014 1025 self.cachestat = filecachesubentry.stat(self.path)
1015 1026
1016 1027 if self.cachestat:
1017 1028 self._cacheable = self.cachestat.cacheable()
1018 1029 else:
1019 1030 # None means we don't know yet
1020 1031 self._cacheable = None
1021 1032
1022 1033 def refresh(self):
1023 1034 if self.cacheable():
1024 1035 self.cachestat = filecachesubentry.stat(self.path)
1025 1036
1026 1037 def cacheable(self):
1027 1038 if self._cacheable is not None:
1028 1039 return self._cacheable
1029 1040
1030 1041 # we don't know yet, assume it is for now
1031 1042 return True
1032 1043
1033 1044 def changed(self):
1034 1045 # no point in going further if we can't cache it
1035 1046 if not self.cacheable():
1036 1047 return True
1037 1048
1038 1049 newstat = filecachesubentry.stat(self.path)
1039 1050
1040 1051 # we may not know if it's cacheable yet, check again now
1041 1052 if newstat and self._cacheable is None:
1042 1053 self._cacheable = newstat.cacheable()
1043 1054
1044 1055 # check again
1045 1056 if not self._cacheable:
1046 1057 return True
1047 1058
1048 1059 if self.cachestat != newstat:
1049 1060 self.cachestat = newstat
1050 1061 return True
1051 1062 else:
1052 1063 return False
1053 1064
1054 1065 @staticmethod
1055 1066 def stat(path):
1056 1067 try:
1057 1068 return util.cachestat(path)
1058 1069 except OSError as e:
1059 1070 if e.errno != errno.ENOENT:
1060 1071 raise
1061 1072
1062 1073 class filecacheentry(object):
1063 1074 def __init__(self, paths, stat=True):
1064 1075 self._entries = []
1065 1076 for path in paths:
1066 1077 self._entries.append(filecachesubentry(path, stat))
1067 1078
1068 1079 def changed(self):
1069 1080 '''true if any entry has changed'''
1070 1081 for entry in self._entries:
1071 1082 if entry.changed():
1072 1083 return True
1073 1084 return False
1074 1085
1075 1086 def refresh(self):
1076 1087 for entry in self._entries:
1077 1088 entry.refresh()
1078 1089
1079 1090 class filecache(object):
1080 1091 '''A property like decorator that tracks files under .hg/ for updates.
1081 1092
1082 1093 Records stat info when called in _filecache.
1083 1094
1084 1095 On subsequent calls, compares old stat info with new info, and recreates the
1085 1096 object when any of the files changes, updating the new stat info in
1086 1097 _filecache.
1087 1098
1088 1099 Mercurial either atomic renames or appends for files under .hg,
1089 1100 so to ensure the cache is reliable we need the filesystem to be able
1090 1101 to tell us if a file has been replaced. If it can't, we fallback to
1091 1102 recreating the object on every call (essentially the same behavior as
1092 1103 propertycache).
1093 1104
1094 1105 '''
1095 1106 def __init__(self, *paths):
1096 1107 self.paths = paths
1097 1108
1098 1109 def join(self, obj, fname):
1099 1110 """Used to compute the runtime path of a cached file.
1100 1111
1101 1112 Users should subclass filecache and provide their own version of this
1102 1113 function to call the appropriate join function on 'obj' (an instance
1103 1114 of the class that its member function was decorated).
1104 1115 """
1105 1116 return obj.join(fname)
1106 1117
1107 1118 def __call__(self, func):
1108 1119 self.func = func
1109 1120 self.name = func.__name__
1110 1121 return self
1111 1122
1112 1123 def __get__(self, obj, type=None):
1113 1124 # do we need to check if the file changed?
1114 1125 if self.name in obj.__dict__:
1115 1126 assert self.name in obj._filecache, self.name
1116 1127 return obj.__dict__[self.name]
1117 1128
1118 1129 entry = obj._filecache.get(self.name)
1119 1130
1120 1131 if entry:
1121 1132 if entry.changed():
1122 1133 entry.obj = self.func(obj)
1123 1134 else:
1124 1135 paths = [self.join(obj, path) for path in self.paths]
1125 1136
1126 1137 # We stat -before- creating the object so our cache doesn't lie if
1127 1138 # a writer modified between the time we read and stat
1128 1139 entry = filecacheentry(paths, True)
1129 1140 entry.obj = self.func(obj)
1130 1141
1131 1142 obj._filecache[self.name] = entry
1132 1143
1133 1144 obj.__dict__[self.name] = entry.obj
1134 1145 return entry.obj
1135 1146
1136 1147 def __set__(self, obj, value):
1137 1148 if self.name not in obj._filecache:
1138 1149 # we add an entry for the missing value because X in __dict__
1139 1150 # implies X in _filecache
1140 1151 paths = [self.join(obj, path) for path in self.paths]
1141 1152 ce = filecacheentry(paths, False)
1142 1153 obj._filecache[self.name] = ce
1143 1154 else:
1144 1155 ce = obj._filecache[self.name]
1145 1156
1146 1157 ce.obj = value # update cached copy
1147 1158 obj.__dict__[self.name] = value # update copy returned by obj.x
1148 1159
1149 1160 def __delete__(self, obj):
1150 1161 try:
1151 1162 del obj.__dict__[self.name]
1152 1163 except KeyError:
1153 1164 raise AttributeError(self.name)
1154 1165
1155 1166 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1156 1167 if lock is None:
1157 1168 raise error.LockInheritanceContractViolation(
1158 1169 'lock can only be inherited while held')
1159 1170 if environ is None:
1160 1171 environ = {}
1161 1172 with lock.inherit() as locker:
1162 1173 environ[envvar] = locker
1163 1174 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1164 1175
1165 1176 def wlocksub(repo, cmd, *args, **kwargs):
1166 1177 """run cmd as a subprocess that allows inheriting repo's wlock
1167 1178
1168 1179 This can only be called while the wlock is held. This takes all the
1169 1180 arguments that ui.system does, and returns the exit code of the
1170 1181 subprocess."""
1171 1182 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1172 1183 **kwargs)
@@ -1,296 +1,318
1 1 $ cat <<EOF > merge
2 2 > import sys, os
3 3 >
4 4 > try:
5 5 > import msvcrt
6 6 > msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
7 7 > msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
8 8 > except ImportError:
9 9 > pass
10 10 >
11 11 > print "merging for", os.path.basename(sys.argv[1])
12 12 > EOF
13 13 $ HGMERGE="python ../merge"; export HGMERGE
14 14
15 15 $ hg init t
16 16 $ cd t
17 17 $ echo This is file a1 > a
18 18 $ hg add a
19 19 $ hg commit -m "commit #0"
20 20 $ echo This is file b1 > b
21 21 $ hg add b
22 22 $ hg commit -m "commit #1"
23 23
24 24 $ hg update 0
25 25 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
26 26
27 27 Test interrupted updates by exploiting our non-handling of directory collisions
28 28
29 29 $ mkdir b
30 30 $ hg up
31 31 abort: *: '$TESTTMP/t/b' (glob)
32 32 [255]
33 33 $ hg ci
34 34 abort: last update was interrupted
35 35 (use 'hg update' to get a consistent checkout)
36 36 [255]
37 37 $ hg sum
38 38 parent: 0:538afb845929
39 39 commit #0
40 40 branch: default
41 41 commit: (interrupted update)
42 42 update: 1 new changesets (update)
43 43 phases: 2 draft
44 44 $ rmdir b
45 45 $ hg up
46 46 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 47 $ hg sum
48 48 parent: 1:b8bb4a988f25 tip
49 49 commit #1
50 50 branch: default
51 51 commit: (clean)
52 52 update: (current)
53 53 phases: 2 draft
54 54
55 55 Prepare a basic merge
56 56
57 57 $ hg up 0
58 58 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 59 $ echo This is file c1 > c
60 60 $ hg add c
61 61 $ hg commit -m "commit #2"
62 62 created new head
63 63 $ echo This is file b1 > b
64 64 no merges expected
65 65 $ hg merge -P 1
66 66 changeset: 1:b8bb4a988f25
67 67 user: test
68 68 date: Thu Jan 01 00:00:00 1970 +0000
69 69 summary: commit #1
70 70
71 71 $ hg merge 1
72 72 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
73 73 (branch merge, don't forget to commit)
74 74 $ hg diff --nodates
75 75 diff -r 49035e18a8e6 b
76 76 --- /dev/null
77 77 +++ b/b
78 78 @@ -0,0 +1,1 @@
79 79 +This is file b1
80 80 $ hg status
81 81 M b
82 82 $ cd ..; rm -r t
83 83
84 84 $ hg init t
85 85 $ cd t
86 86 $ echo This is file a1 > a
87 87 $ hg add a
88 88 $ hg commit -m "commit #0"
89 89 $ echo This is file b1 > b
90 90 $ hg add b
91 91 $ hg commit -m "commit #1"
92 92
93 93 $ hg update 0
94 94 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
95 95 $ echo This is file c1 > c
96 96 $ hg add c
97 97 $ hg commit -m "commit #2"
98 98 created new head
99 99 $ echo This is file b2 > b
100 100 merge should fail
101 101 $ hg merge 1
102 102 b: untracked file differs
103 103 abort: untracked files in working directory differ from files in requested revision
104 104 [255]
105
106 #if symlink
107 symlinks to directories should be treated as regular files (issue5027)
108 $ rm b
109 $ ln -s 'This is file b2' b
110 $ hg merge 1
111 b: untracked file differs
112 abort: untracked files in working directory differ from files in requested revision
113 [255]
114 symlinks shouldn't be followed
115 $ rm b
116 $ echo This is file b1 > .hg/b
117 $ ln -s .hg/b b
118 $ hg merge 1
119 b: untracked file differs
120 abort: untracked files in working directory differ from files in requested revision
121 [255]
122
123 $ rm b
124 $ echo This is file b2 > b
125 #endif
126
105 127 merge of b expected
106 128 $ hg merge -f 1
107 129 merging b
108 130 merging for b
109 131 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
110 132 (branch merge, don't forget to commit)
111 133 $ hg diff --nodates
112 134 diff -r 49035e18a8e6 b
113 135 --- /dev/null
114 136 +++ b/b
115 137 @@ -0,0 +1,1 @@
116 138 +This is file b2
117 139 $ hg status
118 140 M b
119 141 $ cd ..; rm -r t
120 142
121 143 $ hg init t
122 144 $ cd t
123 145 $ echo This is file a1 > a
124 146 $ hg add a
125 147 $ hg commit -m "commit #0"
126 148 $ echo This is file b1 > b
127 149 $ hg add b
128 150 $ hg commit -m "commit #1"
129 151 $ echo This is file b22 > b
130 152 $ hg commit -m "commit #2"
131 153 $ hg update 1
132 154 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 155 $ echo This is file c1 > c
134 156 $ hg add c
135 157 $ hg commit -m "commit #3"
136 158 created new head
137 159
138 160 Contents of b should be "this is file b1"
139 161 $ cat b
140 162 This is file b1
141 163
142 164 $ echo This is file b22 > b
143 165 merge fails
144 166 $ hg merge 2
145 167 abort: uncommitted changes
146 168 (use 'hg status' to list changes)
147 169 [255]
148 170 merge expected!
149 171 $ hg merge -f 2
150 172 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 173 (branch merge, don't forget to commit)
152 174 $ hg diff --nodates
153 175 diff -r 85de557015a8 b
154 176 --- a/b
155 177 +++ b/b
156 178 @@ -1,1 +1,1 @@
157 179 -This is file b1
158 180 +This is file b22
159 181 $ hg status
160 182 M b
161 183 $ cd ..; rm -r t
162 184
163 185 $ hg init t
164 186 $ cd t
165 187 $ echo This is file a1 > a
166 188 $ hg add a
167 189 $ hg commit -m "commit #0"
168 190 $ echo This is file b1 > b
169 191 $ hg add b
170 192 $ hg commit -m "commit #1"
171 193 $ echo This is file b22 > b
172 194 $ hg commit -m "commit #2"
173 195 $ hg update 1
174 196 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 197 $ echo This is file c1 > c
176 198 $ hg add c
177 199 $ hg commit -m "commit #3"
178 200 created new head
179 201 $ echo This is file b33 > b
180 202 merge of b should fail
181 203 $ hg merge 2
182 204 abort: uncommitted changes
183 205 (use 'hg status' to list changes)
184 206 [255]
185 207 merge of b expected
186 208 $ hg merge -f 2
187 209 merging b
188 210 merging for b
189 211 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
190 212 (branch merge, don't forget to commit)
191 213 $ hg diff --nodates
192 214 diff -r 85de557015a8 b
193 215 --- a/b
194 216 +++ b/b
195 217 @@ -1,1 +1,1 @@
196 218 -This is file b1
197 219 +This is file b33
198 220 $ hg status
199 221 M b
200 222
201 223 Test for issue2364
202 224
203 225 $ hg up -qC .
204 226 $ hg rm b
205 227 $ hg ci -md
206 228 $ hg revert -r -2 b
207 229 $ hg up -q -- -2
208 230
209 231 Test that updated files are treated as "modified", when
210 232 'merge.update()' is aborted before 'merge.recordupdates()' (= parents
211 233 aren't changed), even if none of mode, size and timestamp of them
212 234 isn't changed on the filesystem (see also issue4583).
213 235
214 236 $ cat > $TESTTMP/abort.py <<EOF
215 237 > # emulate aborting before "recordupdates()". in this case, files
216 238 > # are changed without updating dirstate
217 239 > from mercurial import extensions, merge, error
218 240 > def applyupdates(orig, *args, **kwargs):
219 241 > orig(*args, **kwargs)
220 242 > raise error.Abort('intentional aborting')
221 243 > def extsetup(ui):
222 244 > extensions.wrapfunction(merge, "applyupdates", applyupdates)
223 245 > EOF
224 246
225 247 $ cat >> .hg/hgrc <<EOF
226 248 > [fakedirstatewritetime]
227 249 > # emulate invoking dirstate.write() via repo.status()
228 250 > # at 2000-01-01 00:00
229 251 > fakenow = 200001010000
230 252 > EOF
231 253
232 254 (file gotten from other revision)
233 255
234 256 $ hg update -q -C 2
235 257 $ echo 'THIS IS FILE B5' > b
236 258 $ hg commit -m 'commit #5'
237 259
238 260 $ hg update -q -C 3
239 261 $ cat b
240 262 This is file b1
241 263 $ touch -t 200001010000 b
242 264 $ hg debugrebuildstate
243 265
244 266 $ cat >> .hg/hgrc <<EOF
245 267 > [extensions]
246 268 > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
247 269 > abort = $TESTTMP/abort.py
248 270 > EOF
249 271 $ hg merge 5
250 272 abort: intentional aborting
251 273 [255]
252 274 $ cat >> .hg/hgrc <<EOF
253 275 > [extensions]
254 276 > fakedirstatewritetime = !
255 277 > abort = !
256 278 > EOF
257 279
258 280 $ cat b
259 281 THIS IS FILE B5
260 282 $ touch -t 200001010000 b
261 283 $ hg status -A b
262 284 M b
263 285
264 286 (file merged from other revision)
265 287
266 288 $ hg update -q -C 3
267 289 $ echo 'this is file b6' > b
268 290 $ hg commit -m 'commit #6'
269 291 created new head
270 292
271 293 $ cat b
272 294 this is file b6
273 295 $ touch -t 200001010000 b
274 296 $ hg debugrebuildstate
275 297
276 298 $ cat >> .hg/hgrc <<EOF
277 299 > [extensions]
278 300 > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
279 301 > abort = $TESTTMP/abort.py
280 302 > EOF
281 303 $ hg merge --tool internal:other 5
282 304 abort: intentional aborting
283 305 [255]
284 306 $ cat >> .hg/hgrc <<EOF
285 307 > [extensions]
286 308 > fakedirstatewritetime = !
287 309 > abort = !
288 310 > EOF
289 311
290 312 $ cat b
291 313 THIS IS FILE B5
292 314 $ touch -t 200001010000 b
293 315 $ hg status -A b
294 316 M b
295 317
296 318 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now