##// END OF EJS Templates
context: name files relative to cwd in warning messages...
Matt Harbison -
r33501:7008f681 default
parent child Browse files
Show More
@@ -1,2322 +1,2330 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 mdiff,
34 34 obsolete as obsmod,
35 35 patch,
36 pathutil,
36 37 phases,
37 38 pycompat,
38 39 repoview,
39 40 revlog,
40 41 scmutil,
41 42 sparse,
42 43 subrepo,
43 44 util,
44 45 )
45 46
46 47 propertycache = util.propertycache
47 48
48 49 nonascii = re.compile(r'[^\x21-\x7f]').search
49 50
50 51 class basectx(object):
51 52 """A basectx object represents the common logic for its children:
52 53 changectx: read-only context that is already present in the repo,
53 54 workingctx: a context that represents the working directory and can
54 55 be committed,
55 56 memctx: a context that represents changes in-memory and can also
56 57 be committed."""
57 58 def __new__(cls, repo, changeid='', *args, **kwargs):
58 59 if isinstance(changeid, basectx):
59 60 return changeid
60 61
61 62 o = super(basectx, cls).__new__(cls)
62 63
63 64 o._repo = repo
64 65 o._rev = nullrev
65 66 o._node = nullid
66 67
67 68 return o
68 69
69 70 def __bytes__(self):
70 71 return short(self.node())
71 72
72 73 __str__ = encoding.strmethod(__bytes__)
73 74
74 75 def __int__(self):
75 76 return self.rev()
76 77
77 78 def __repr__(self):
78 79 return r"<%s %s>" % (type(self).__name__, str(self))
79 80
80 81 def __eq__(self, other):
81 82 try:
82 83 return type(self) == type(other) and self._rev == other._rev
83 84 except AttributeError:
84 85 return False
85 86
86 87 def __ne__(self, other):
87 88 return not (self == other)
88 89
89 90 def __contains__(self, key):
90 91 return key in self._manifest
91 92
92 93 def __getitem__(self, key):
93 94 return self.filectx(key)
94 95
95 96 def __iter__(self):
96 97 return iter(self._manifest)
97 98
98 99 def _buildstatusmanifest(self, status):
99 100 """Builds a manifest that includes the given status results, if this is
100 101 a working copy context. For non-working copy contexts, it just returns
101 102 the normal manifest."""
102 103 return self.manifest()
103 104
104 105 def _matchstatus(self, other, match):
105 106 """return match.always if match is none
106 107
107 108 This internal method provides a way for child objects to override the
108 109 match operator.
109 110 """
110 111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111 112
112 113 def _buildstatus(self, other, s, match, listignored, listclean,
113 114 listunknown):
114 115 """build a status with respect to another context"""
115 116 # Load earliest manifest first for caching reasons. More specifically,
116 117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 119 # 1000 and cache it so that when you read 1001, we just need to apply a
119 120 # delta to what's in the cache. So that's one full reconstruction + one
120 121 # delta application.
121 122 mf2 = None
122 123 if self.rev() is not None and self.rev() < other.rev():
123 124 mf2 = self._buildstatusmanifest(s)
124 125 mf1 = other._buildstatusmanifest(s)
125 126 if mf2 is None:
126 127 mf2 = self._buildstatusmanifest(s)
127 128
128 129 modified, added = [], []
129 130 removed = []
130 131 clean = []
131 132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
132 133 deletedset = set(deleted)
133 134 d = mf1.diff(mf2, match=match, clean=listclean)
134 135 for fn, value in d.iteritems():
135 136 if fn in deletedset:
136 137 continue
137 138 if value is None:
138 139 clean.append(fn)
139 140 continue
140 141 (node1, flag1), (node2, flag2) = value
141 142 if node1 is None:
142 143 added.append(fn)
143 144 elif node2 is None:
144 145 removed.append(fn)
145 146 elif flag1 != flag2:
146 147 modified.append(fn)
147 148 elif node2 not in wdirnodes:
148 149 # When comparing files between two commits, we save time by
149 150 # not comparing the file contents when the nodeids differ.
150 151 # Note that this means we incorrectly report a reverted change
151 152 # to a file as a modification.
152 153 modified.append(fn)
153 154 elif self[fn].cmp(other[fn]):
154 155 modified.append(fn)
155 156 else:
156 157 clean.append(fn)
157 158
158 159 if removed:
159 160 # need to filter files if they are already reported as removed
160 161 unknown = [fn for fn in unknown if fn not in mf1 and
161 162 (not match or match(fn))]
162 163 ignored = [fn for fn in ignored if fn not in mf1 and
163 164 (not match or match(fn))]
164 165 # if they're deleted, don't report them as removed
165 166 removed = [fn for fn in removed if fn not in deletedset]
166 167
167 168 return scmutil.status(modified, added, removed, deleted, unknown,
168 169 ignored, clean)
169 170
170 171 @propertycache
171 172 def substate(self):
172 173 return subrepo.state(self, self._repo.ui)
173 174
174 175 def subrev(self, subpath):
175 176 return self.substate[subpath][1]
176 177
177 178 def rev(self):
178 179 return self._rev
179 180 def node(self):
180 181 return self._node
181 182 def hex(self):
182 183 return hex(self.node())
183 184 def manifest(self):
184 185 return self._manifest
185 186 def manifestctx(self):
186 187 return self._manifestctx
187 188 def repo(self):
188 189 return self._repo
189 190 def phasestr(self):
190 191 return phases.phasenames[self.phase()]
191 192 def mutable(self):
192 193 return self.phase() > phases.public
193 194
194 195 def getfileset(self, expr):
195 196 return fileset.getfileset(self, expr)
196 197
197 198 def obsolete(self):
198 199 """True if the changeset is obsolete"""
199 200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
200 201
201 202 def extinct(self):
202 203 """True if the changeset is extinct"""
203 204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
204 205
205 206 def unstable(self):
206 207 """True if the changeset is not obsolete but it's ancestor are"""
207 208 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
208 209
209 210 def bumped(self):
210 211 """True if the changeset try to be a successor of a public changeset
211 212
212 213 Only non-public and non-obsolete changesets may be bumped.
213 214 """
214 215 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
215 216
216 217 def divergent(self):
217 218 """Is a successors of a changeset with multiple possible successors set
218 219
219 220 Only non-public and non-obsolete changesets may be divergent.
220 221 """
221 222 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
222 223
223 224 def troubled(self):
224 225 """True if the changeset is either unstable, bumped or divergent"""
225 226 return self.unstable() or self.bumped() or self.divergent()
226 227
227 228 def troubles(self):
228 229 """return the list of troubles affecting this changesets.
229 230
230 231 Troubles are returned as strings. possible values are:
231 232 - unstable,
232 233 - bumped,
233 234 - divergent.
234 235 """
235 236 troubles = []
236 237 if self.unstable():
237 238 troubles.append('unstable')
238 239 if self.bumped():
239 240 troubles.append('bumped')
240 241 if self.divergent():
241 242 troubles.append('divergent')
242 243 return troubles
243 244
244 245 def parents(self):
245 246 """return contexts for each parent changeset"""
246 247 return self._parents
247 248
248 249 def p1(self):
249 250 return self._parents[0]
250 251
251 252 def p2(self):
252 253 parents = self._parents
253 254 if len(parents) == 2:
254 255 return parents[1]
255 256 return changectx(self._repo, nullrev)
256 257
257 258 def _fileinfo(self, path):
258 259 if r'_manifest' in self.__dict__:
259 260 try:
260 261 return self._manifest[path], self._manifest.flags(path)
261 262 except KeyError:
262 263 raise error.ManifestLookupError(self._node, path,
263 264 _('not found in manifest'))
264 265 if r'_manifestdelta' in self.__dict__ or path in self.files():
265 266 if path in self._manifestdelta:
266 267 return (self._manifestdelta[path],
267 268 self._manifestdelta.flags(path))
268 269 mfl = self._repo.manifestlog
269 270 try:
270 271 node, flag = mfl[self._changeset.manifest].find(path)
271 272 except KeyError:
272 273 raise error.ManifestLookupError(self._node, path,
273 274 _('not found in manifest'))
274 275
275 276 return node, flag
276 277
277 278 def filenode(self, path):
278 279 return self._fileinfo(path)[0]
279 280
280 281 def flags(self, path):
281 282 try:
282 283 return self._fileinfo(path)[1]
283 284 except error.LookupError:
284 285 return ''
285 286
286 287 def sub(self, path, allowcreate=True):
287 288 '''return a subrepo for the stored revision of path, never wdir()'''
288 289 return subrepo.subrepo(self, path, allowcreate=allowcreate)
289 290
290 291 def nullsub(self, path, pctx):
291 292 return subrepo.nullsubrepo(self, path, pctx)
292 293
293 294 def workingsub(self, path):
294 295 '''return a subrepo for the stored revision, or wdir if this is a wdir
295 296 context.
296 297 '''
297 298 return subrepo.subrepo(self, path, allowwdir=True)
298 299
299 300 def match(self, pats=None, include=None, exclude=None, default='glob',
300 301 listsubrepos=False, badfn=None):
301 302 r = self._repo
302 303 return matchmod.match(r.root, r.getcwd(), pats,
303 304 include, exclude, default,
304 305 auditor=r.nofsauditor, ctx=self,
305 306 listsubrepos=listsubrepos, badfn=badfn)
306 307
307 308 def diff(self, ctx2=None, match=None, **opts):
308 309 """Returns a diff generator for the given contexts and matcher"""
309 310 if ctx2 is None:
310 311 ctx2 = self.p1()
311 312 if ctx2 is not None:
312 313 ctx2 = self._repo[ctx2]
313 314 diffopts = patch.diffopts(self._repo.ui, opts)
314 315 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
315 316
316 317 def dirs(self):
317 318 return self._manifest.dirs()
318 319
319 320 def hasdir(self, dir):
320 321 return self._manifest.hasdir(dir)
321 322
322 323 def status(self, other=None, match=None, listignored=False,
323 324 listclean=False, listunknown=False, listsubrepos=False):
324 325 """return status of files between two nodes or node and working
325 326 directory.
326 327
327 328 If other is None, compare this node with working directory.
328 329
329 330 returns (modified, added, removed, deleted, unknown, ignored, clean)
330 331 """
331 332
332 333 ctx1 = self
333 334 ctx2 = self._repo[other]
334 335
335 336 # This next code block is, admittedly, fragile logic that tests for
336 337 # reversing the contexts and wouldn't need to exist if it weren't for
337 338 # the fast (and common) code path of comparing the working directory
338 339 # with its first parent.
339 340 #
340 341 # What we're aiming for here is the ability to call:
341 342 #
342 343 # workingctx.status(parentctx)
343 344 #
344 345 # If we always built the manifest for each context and compared those,
345 346 # then we'd be done. But the special case of the above call means we
346 347 # just copy the manifest of the parent.
347 348 reversed = False
348 349 if (not isinstance(ctx1, changectx)
349 350 and isinstance(ctx2, changectx)):
350 351 reversed = True
351 352 ctx1, ctx2 = ctx2, ctx1
352 353
353 354 match = ctx2._matchstatus(ctx1, match)
354 355 r = scmutil.status([], [], [], [], [], [], [])
355 356 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
356 357 listunknown)
357 358
358 359 if reversed:
359 360 # Reverse added and removed. Clear deleted, unknown and ignored as
360 361 # these make no sense to reverse.
361 362 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
362 363 r.clean)
363 364
364 365 if listsubrepos:
365 366 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
366 367 try:
367 368 rev2 = ctx2.subrev(subpath)
368 369 except KeyError:
369 370 # A subrepo that existed in node1 was deleted between
370 371 # node1 and node2 (inclusive). Thus, ctx2's substate
371 372 # won't contain that subpath. The best we can do ignore it.
372 373 rev2 = None
373 374 submatch = matchmod.subdirmatcher(subpath, match)
374 375 s = sub.status(rev2, match=submatch, ignored=listignored,
375 376 clean=listclean, unknown=listunknown,
376 377 listsubrepos=True)
377 378 for rfiles, sfiles in zip(r, s):
378 379 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
379 380
380 381 for l in r:
381 382 l.sort()
382 383
383 384 return r
384 385
385 386 def _filterederror(repo, changeid):
386 387 """build an exception to be raised about a filtered changeid
387 388
388 389 This is extracted in a function to help extensions (eg: evolve) to
389 390 experiment with various message variants."""
390 391 if repo.filtername.startswith('visible'):
391 392 msg = _("hidden revision '%s'") % changeid
392 393 hint = _('use --hidden to access hidden revisions')
393 394 return error.FilteredRepoLookupError(msg, hint=hint)
394 395 msg = _("filtered revision '%s' (not in '%s' subset)")
395 396 msg %= (changeid, repo.filtername)
396 397 return error.FilteredRepoLookupError(msg)
397 398
398 399 class changectx(basectx):
399 400 """A changecontext object makes access to data related to a particular
400 401 changeset convenient. It represents a read-only context already present in
401 402 the repo."""
402 403 def __init__(self, repo, changeid=''):
403 404 """changeid is a revision number, node, or tag"""
404 405
405 406 # since basectx.__new__ already took care of copying the object, we
406 407 # don't need to do anything in __init__, so we just exit here
407 408 if isinstance(changeid, basectx):
408 409 return
409 410
410 411 if changeid == '':
411 412 changeid = '.'
412 413 self._repo = repo
413 414
414 415 try:
415 416 if isinstance(changeid, int):
416 417 self._node = repo.changelog.node(changeid)
417 418 self._rev = changeid
418 419 return
419 420 if not pycompat.ispy3 and isinstance(changeid, long):
420 421 changeid = str(changeid)
421 422 if changeid == 'null':
422 423 self._node = nullid
423 424 self._rev = nullrev
424 425 return
425 426 if changeid == 'tip':
426 427 self._node = repo.changelog.tip()
427 428 self._rev = repo.changelog.rev(self._node)
428 429 return
429 430 if changeid == '.' or changeid == repo.dirstate.p1():
430 431 # this is a hack to delay/avoid loading obsmarkers
431 432 # when we know that '.' won't be hidden
432 433 self._node = repo.dirstate.p1()
433 434 self._rev = repo.unfiltered().changelog.rev(self._node)
434 435 return
435 436 if len(changeid) == 20:
436 437 try:
437 438 self._node = changeid
438 439 self._rev = repo.changelog.rev(changeid)
439 440 return
440 441 except error.FilteredRepoLookupError:
441 442 raise
442 443 except LookupError:
443 444 pass
444 445
445 446 try:
446 447 r = int(changeid)
447 448 if '%d' % r != changeid:
448 449 raise ValueError
449 450 l = len(repo.changelog)
450 451 if r < 0:
451 452 r += l
452 453 if r < 0 or r >= l and r != wdirrev:
453 454 raise ValueError
454 455 self._rev = r
455 456 self._node = repo.changelog.node(r)
456 457 return
457 458 except error.FilteredIndexError:
458 459 raise
459 460 except (ValueError, OverflowError, IndexError):
460 461 pass
461 462
462 463 if len(changeid) == 40:
463 464 try:
464 465 self._node = bin(changeid)
465 466 self._rev = repo.changelog.rev(self._node)
466 467 return
467 468 except error.FilteredLookupError:
468 469 raise
469 470 except (TypeError, LookupError):
470 471 pass
471 472
472 473 # lookup bookmarks through the name interface
473 474 try:
474 475 self._node = repo.names.singlenode(repo, changeid)
475 476 self._rev = repo.changelog.rev(self._node)
476 477 return
477 478 except KeyError:
478 479 pass
479 480 except error.FilteredRepoLookupError:
480 481 raise
481 482 except error.RepoLookupError:
482 483 pass
483 484
484 485 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 486 if self._node is not None:
486 487 self._rev = repo.changelog.rev(self._node)
487 488 return
488 489
489 490 # lookup failed
490 491 # check if it might have come from damaged dirstate
491 492 #
492 493 # XXX we could avoid the unfiltered if we had a recognizable
493 494 # exception for filtered changeset access
494 495 if changeid in repo.unfiltered().dirstate.parents():
495 496 msg = _("working directory has unknown parent '%s'!")
496 497 raise error.Abort(msg % short(changeid))
497 498 try:
498 499 if len(changeid) == 20 and nonascii(changeid):
499 500 changeid = hex(changeid)
500 501 except TypeError:
501 502 pass
502 503 except (error.FilteredIndexError, error.FilteredLookupError,
503 504 error.FilteredRepoLookupError):
504 505 raise _filterederror(repo, changeid)
505 506 except IndexError:
506 507 pass
507 508 raise error.RepoLookupError(
508 509 _("unknown revision '%s'") % changeid)
509 510
510 511 def __hash__(self):
511 512 try:
512 513 return hash(self._rev)
513 514 except AttributeError:
514 515 return id(self)
515 516
516 517 def __nonzero__(self):
517 518 return self._rev != nullrev
518 519
519 520 __bool__ = __nonzero__
520 521
521 522 @propertycache
522 523 def _changeset(self):
523 524 return self._repo.changelog.changelogrevision(self.rev())
524 525
525 526 @propertycache
526 527 def _manifest(self):
527 528 return self._manifestctx.read()
528 529
529 530 @property
530 531 def _manifestctx(self):
531 532 return self._repo.manifestlog[self._changeset.manifest]
532 533
533 534 @propertycache
534 535 def _manifestdelta(self):
535 536 return self._manifestctx.readdelta()
536 537
537 538 @propertycache
538 539 def _parents(self):
539 540 repo = self._repo
540 541 p1, p2 = repo.changelog.parentrevs(self._rev)
541 542 if p2 == nullrev:
542 543 return [changectx(repo, p1)]
543 544 return [changectx(repo, p1), changectx(repo, p2)]
544 545
545 546 def changeset(self):
546 547 c = self._changeset
547 548 return (
548 549 c.manifest,
549 550 c.user,
550 551 c.date,
551 552 c.files,
552 553 c.description,
553 554 c.extra,
554 555 )
555 556 def manifestnode(self):
556 557 return self._changeset.manifest
557 558
558 559 def user(self):
559 560 return self._changeset.user
560 561 def date(self):
561 562 return self._changeset.date
562 563 def files(self):
563 564 return self._changeset.files
564 565 def description(self):
565 566 return self._changeset.description
566 567 def branch(self):
567 568 return encoding.tolocal(self._changeset.extra.get("branch"))
568 569 def closesbranch(self):
569 570 return 'close' in self._changeset.extra
570 571 def extra(self):
571 572 return self._changeset.extra
572 573 def tags(self):
573 574 return self._repo.nodetags(self._node)
574 575 def bookmarks(self):
575 576 return self._repo.nodebookmarks(self._node)
576 577 def phase(self):
577 578 return self._repo._phasecache.phase(self._repo, self._rev)
578 579 def hidden(self):
579 580 return self._rev in repoview.filterrevs(self._repo, 'visible')
580 581
581 582 def children(self):
582 583 """return contexts for each child changeset"""
583 584 c = self._repo.changelog.children(self._node)
584 585 return [changectx(self._repo, x) for x in c]
585 586
586 587 def ancestors(self):
587 588 for a in self._repo.changelog.ancestors([self._rev]):
588 589 yield changectx(self._repo, a)
589 590
590 591 def descendants(self):
591 592 for d in self._repo.changelog.descendants([self._rev]):
592 593 yield changectx(self._repo, d)
593 594
594 595 def filectx(self, path, fileid=None, filelog=None):
595 596 """get a file context from this changeset"""
596 597 if fileid is None:
597 598 fileid = self.filenode(path)
598 599 return filectx(self._repo, path, fileid=fileid,
599 600 changectx=self, filelog=filelog)
600 601
601 602 def ancestor(self, c2, warn=False):
602 603 """return the "best" ancestor context of self and c2
603 604
604 605 If there are multiple candidates, it will show a message and check
605 606 merge.preferancestor configuration before falling back to the
606 607 revlog ancestor."""
607 608 # deal with workingctxs
608 609 n2 = c2._node
609 610 if n2 is None:
610 611 n2 = c2._parents[0]._node
611 612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
612 613 if not cahs:
613 614 anc = nullid
614 615 elif len(cahs) == 1:
615 616 anc = cahs[0]
616 617 else:
617 618 # experimental config: merge.preferancestor
618 619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
619 620 try:
620 621 ctx = changectx(self._repo, r)
621 622 except error.RepoLookupError:
622 623 continue
623 624 anc = ctx.node()
624 625 if anc in cahs:
625 626 break
626 627 else:
627 628 anc = self._repo.changelog.ancestor(self._node, n2)
628 629 if warn:
629 630 self._repo.ui.status(
630 631 (_("note: using %s as ancestor of %s and %s\n") %
631 632 (short(anc), short(self._node), short(n2))) +
632 633 ''.join(_(" alternatively, use --config "
633 634 "merge.preferancestor=%s\n") %
634 635 short(n) for n in sorted(cahs) if n != anc))
635 636 return changectx(self._repo, anc)
636 637
637 638 def descendant(self, other):
638 639 """True if other is descendant of this changeset"""
639 640 return self._repo.changelog.descendant(self._rev, other._rev)
640 641
641 642 def walk(self, match):
642 643 '''Generates matching file names.'''
643 644
644 645 # Wrap match.bad method to have message with nodeid
645 646 def bad(fn, msg):
646 647 # The manifest doesn't know about subrepos, so don't complain about
647 648 # paths into valid subrepos.
648 649 if any(fn == s or fn.startswith(s + '/')
649 650 for s in self.substate):
650 651 return
651 652 match.bad(fn, _('no such file in rev %s') % self)
652 653
653 654 m = matchmod.badmatch(match, bad)
654 655 return self._manifest.walk(m)
655 656
656 657 def matches(self, match):
657 658 return self.walk(match)
658 659
659 660 class basefilectx(object):
660 661 """A filecontext object represents the common logic for its children:
661 662 filectx: read-only access to a filerevision that is already present
662 663 in the repo,
663 664 workingfilectx: a filecontext that represents files from the working
664 665 directory,
665 666 memfilectx: a filecontext that represents files in-memory,
666 667 overlayfilectx: duplicate another filecontext with some fields overridden.
667 668 """
668 669 @propertycache
669 670 def _filelog(self):
670 671 return self._repo.file(self._path)
671 672
672 673 @propertycache
673 674 def _changeid(self):
674 675 if r'_changeid' in self.__dict__:
675 676 return self._changeid
676 677 elif r'_changectx' in self.__dict__:
677 678 return self._changectx.rev()
678 679 elif r'_descendantrev' in self.__dict__:
679 680 # this file context was created from a revision with a known
680 681 # descendant, we can (lazily) correct for linkrev aliases
681 682 return self._adjustlinkrev(self._descendantrev)
682 683 else:
683 684 return self._filelog.linkrev(self._filerev)
684 685
685 686 @propertycache
686 687 def _filenode(self):
687 688 if r'_fileid' in self.__dict__:
688 689 return self._filelog.lookup(self._fileid)
689 690 else:
690 691 return self._changectx.filenode(self._path)
691 692
692 693 @propertycache
693 694 def _filerev(self):
694 695 return self._filelog.rev(self._filenode)
695 696
696 697 @propertycache
697 698 def _repopath(self):
698 699 return self._path
699 700
700 701 def __nonzero__(self):
701 702 try:
702 703 self._filenode
703 704 return True
704 705 except error.LookupError:
705 706 # file is missing
706 707 return False
707 708
708 709 __bool__ = __nonzero__
709 710
710 711 def __bytes__(self):
711 712 try:
712 713 return "%s@%s" % (self.path(), self._changectx)
713 714 except error.LookupError:
714 715 return "%s@???" % self.path()
715 716
716 717 __str__ = encoding.strmethod(__bytes__)
717 718
718 719 def __repr__(self):
719 720 return "<%s %s>" % (type(self).__name__, str(self))
720 721
721 722 def __hash__(self):
722 723 try:
723 724 return hash((self._path, self._filenode))
724 725 except AttributeError:
725 726 return id(self)
726 727
727 728 def __eq__(self, other):
728 729 try:
729 730 return (type(self) == type(other) and self._path == other._path
730 731 and self._filenode == other._filenode)
731 732 except AttributeError:
732 733 return False
733 734
734 735 def __ne__(self, other):
735 736 return not (self == other)
736 737
737 738 def filerev(self):
738 739 return self._filerev
739 740 def filenode(self):
740 741 return self._filenode
741 742 @propertycache
742 743 def _flags(self):
743 744 return self._changectx.flags(self._path)
744 745 def flags(self):
745 746 return self._flags
746 747 def filelog(self):
747 748 return self._filelog
748 749 def rev(self):
749 750 return self._changeid
750 751 def linkrev(self):
751 752 return self._filelog.linkrev(self._filerev)
752 753 def node(self):
753 754 return self._changectx.node()
754 755 def hex(self):
755 756 return self._changectx.hex()
756 757 def user(self):
757 758 return self._changectx.user()
758 759 def date(self):
759 760 return self._changectx.date()
760 761 def files(self):
761 762 return self._changectx.files()
762 763 def description(self):
763 764 return self._changectx.description()
764 765 def branch(self):
765 766 return self._changectx.branch()
766 767 def extra(self):
767 768 return self._changectx.extra()
768 769 def phase(self):
769 770 return self._changectx.phase()
770 771 def phasestr(self):
771 772 return self._changectx.phasestr()
772 773 def manifest(self):
773 774 return self._changectx.manifest()
774 775 def changectx(self):
775 776 return self._changectx
776 777 def renamed(self):
777 778 return self._copied
778 779 def repo(self):
779 780 return self._repo
780 781 def size(self):
781 782 return len(self.data())
782 783
783 784 def path(self):
784 785 return self._path
785 786
786 787 def isbinary(self):
787 788 try:
788 789 return util.binary(self.data())
789 790 except IOError:
790 791 return False
791 792 def isexec(self):
792 793 return 'x' in self.flags()
793 794 def islink(self):
794 795 return 'l' in self.flags()
795 796
796 797 def isabsent(self):
797 798 """whether this filectx represents a file not in self._changectx
798 799
799 800 This is mainly for merge code to detect change/delete conflicts. This is
800 801 expected to be True for all subclasses of basectx."""
801 802 return False
802 803
803 804 _customcmp = False
804 805 def cmp(self, fctx):
805 806 """compare with other file context
806 807
807 808 returns True if different than fctx.
808 809 """
809 810 if fctx._customcmp:
810 811 return fctx.cmp(self)
811 812
812 813 if (fctx._filenode is None
813 814 and (self._repo._encodefilterpats
814 815 # if file data starts with '\1\n', empty metadata block is
815 816 # prepended, which adds 4 bytes to filelog.size().
816 817 or self.size() - 4 == fctx.size())
817 818 or self.size() == fctx.size()):
818 819 return self._filelog.cmp(self._filenode, fctx.data())
819 820
820 821 return True
821 822
822 823 def _adjustlinkrev(self, srcrev, inclusive=False):
823 824 """return the first ancestor of <srcrev> introducing <fnode>
824 825
825 826 If the linkrev of the file revision does not point to an ancestor of
826 827 srcrev, we'll walk down the ancestors until we find one introducing
827 828 this file revision.
828 829
829 830 :srcrev: the changeset revision we search ancestors from
830 831 :inclusive: if true, the src revision will also be checked
831 832 """
832 833 repo = self._repo
833 834 cl = repo.unfiltered().changelog
834 835 mfl = repo.manifestlog
835 836 # fetch the linkrev
836 837 lkr = self.linkrev()
837 838 # hack to reuse ancestor computation when searching for renames
838 839 memberanc = getattr(self, '_ancestrycontext', None)
839 840 iteranc = None
840 841 if srcrev is None:
841 842 # wctx case, used by workingfilectx during mergecopy
842 843 revs = [p.rev() for p in self._repo[None].parents()]
843 844 inclusive = True # we skipped the real (revless) source
844 845 else:
845 846 revs = [srcrev]
846 847 if memberanc is None:
847 848 memberanc = iteranc = cl.ancestors(revs, lkr,
848 849 inclusive=inclusive)
849 850 # check if this linkrev is an ancestor of srcrev
850 851 if lkr not in memberanc:
851 852 if iteranc is None:
852 853 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
853 854 fnode = self._filenode
854 855 path = self._path
855 856 for a in iteranc:
856 857 ac = cl.read(a) # get changeset data (we avoid object creation)
857 858 if path in ac[3]: # checking the 'files' field.
858 859 # The file has been touched, check if the content is
859 860 # similar to the one we search for.
860 861 if fnode == mfl[ac[0]].readfast().get(path):
861 862 return a
862 863 # In theory, we should never get out of that loop without a result.
863 864 # But if manifest uses a buggy file revision (not children of the
864 865 # one it replaces) we could. Such a buggy situation will likely
865 866 # result is crash somewhere else at to some point.
866 867 return lkr
867 868
868 869 def introrev(self):
869 870 """return the rev of the changeset which introduced this file revision
870 871
871 872 This method is different from linkrev because it take into account the
872 873 changeset the filectx was created from. It ensures the returned
873 874 revision is one of its ancestors. This prevents bugs from
874 875 'linkrev-shadowing' when a file revision is used by multiple
875 876 changesets.
876 877 """
877 878 lkr = self.linkrev()
878 879 attrs = vars(self)
879 880 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
880 881 if noctx or self.rev() == lkr:
881 882 return self.linkrev()
882 883 return self._adjustlinkrev(self.rev(), inclusive=True)
883 884
884 885 def _parentfilectx(self, path, fileid, filelog):
885 886 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
886 887 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
887 888 if '_changeid' in vars(self) or '_changectx' in vars(self):
888 889 # If self is associated with a changeset (probably explicitly
889 890 # fed), ensure the created filectx is associated with a
890 891 # changeset that is an ancestor of self.changectx.
891 892 # This lets us later use _adjustlinkrev to get a correct link.
892 893 fctx._descendantrev = self.rev()
893 894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 895 elif '_descendantrev' in vars(self):
895 896 # Otherwise propagate _descendantrev if we have one associated.
896 897 fctx._descendantrev = self._descendantrev
897 898 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
898 899 return fctx
899 900
900 901 def parents(self):
901 902 _path = self._path
902 903 fl = self._filelog
903 904 parents = self._filelog.parents(self._filenode)
904 905 pl = [(_path, node, fl) for node in parents if node != nullid]
905 906
906 907 r = fl.renamed(self._filenode)
907 908 if r:
908 909 # - In the simple rename case, both parent are nullid, pl is empty.
909 910 # - In case of merge, only one of the parent is null id and should
910 911 # be replaced with the rename information. This parent is -always-
911 912 # the first one.
912 913 #
913 914 # As null id have always been filtered out in the previous list
914 915 # comprehension, inserting to 0 will always result in "replacing
915 916 # first nullid parent with rename information.
916 917 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
917 918
918 919 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
919 920
920 921 def p1(self):
921 922 return self.parents()[0]
922 923
923 924 def p2(self):
924 925 p = self.parents()
925 926 if len(p) == 2:
926 927 return p[1]
927 928 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
928 929
929 930 def annotate(self, follow=False, linenumber=False, skiprevs=None,
930 931 diffopts=None):
931 932 '''returns a list of tuples of ((ctx, number), line) for each line
932 933 in the file, where ctx is the filectx of the node where
933 934 that line was last changed; if linenumber parameter is true, number is
934 935 the line number at the first appearance in the managed file, otherwise,
935 936 number has a fixed value of False.
936 937 '''
937 938
938 939 def lines(text):
939 940 if text.endswith("\n"):
940 941 return text.count("\n")
941 942 return text.count("\n") + int(bool(text))
942 943
943 944 if linenumber:
944 945 def decorate(text, rev):
945 946 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
946 947 else:
947 948 def decorate(text, rev):
948 949 return ([(rev, False)] * lines(text), text)
949 950
950 951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
951 952
952 953 def parents(f):
953 954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
954 955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
955 956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
956 957 # isn't an ancestor of the srcrev.
957 958 f._changeid
958 959 pl = f.parents()
959 960
960 961 # Don't return renamed parents if we aren't following.
961 962 if not follow:
962 963 pl = [p for p in pl if p.path() == f.path()]
963 964
964 965 # renamed filectx won't have a filelog yet, so set it
965 966 # from the cache to save time
966 967 for p in pl:
967 968 if not '_filelog' in p.__dict__:
968 969 p._filelog = getlog(p.path())
969 970
970 971 return pl
971 972
972 973 # use linkrev to find the first changeset where self appeared
973 974 base = self
974 975 introrev = self.introrev()
975 976 if self.rev() != introrev:
976 977 base = self.filectx(self.filenode(), changeid=introrev)
977 978 if getattr(base, '_ancestrycontext', None) is None:
978 979 cl = self._repo.changelog
979 980 if introrev is None:
980 981 # wctx is not inclusive, but works because _ancestrycontext
981 982 # is used to test filelog revisions
982 983 ac = cl.ancestors([p.rev() for p in base.parents()],
983 984 inclusive=True)
984 985 else:
985 986 ac = cl.ancestors([introrev], inclusive=True)
986 987 base._ancestrycontext = ac
987 988
988 989 # This algorithm would prefer to be recursive, but Python is a
989 990 # bit recursion-hostile. Instead we do an iterative
990 991 # depth-first search.
991 992
992 993 # 1st DFS pre-calculates pcache and needed
993 994 visit = [base]
994 995 pcache = {}
995 996 needed = {base: 1}
996 997 while visit:
997 998 f = visit.pop()
998 999 if f in pcache:
999 1000 continue
1000 1001 pl = parents(f)
1001 1002 pcache[f] = pl
1002 1003 for p in pl:
1003 1004 needed[p] = needed.get(p, 0) + 1
1004 1005 if p not in pcache:
1005 1006 visit.append(p)
1006 1007
1007 1008 # 2nd DFS does the actual annotate
1008 1009 visit[:] = [base]
1009 1010 hist = {}
1010 1011 while visit:
1011 1012 f = visit[-1]
1012 1013 if f in hist:
1013 1014 visit.pop()
1014 1015 continue
1015 1016
1016 1017 ready = True
1017 1018 pl = pcache[f]
1018 1019 for p in pl:
1019 1020 if p not in hist:
1020 1021 ready = False
1021 1022 visit.append(p)
1022 1023 if ready:
1023 1024 visit.pop()
1024 1025 curr = decorate(f.data(), f)
1025 1026 skipchild = False
1026 1027 if skiprevs is not None:
1027 1028 skipchild = f._changeid in skiprevs
1028 1029 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1029 1030 diffopts)
1030 1031 for p in pl:
1031 1032 if needed[p] == 1:
1032 1033 del hist[p]
1033 1034 del needed[p]
1034 1035 else:
1035 1036 needed[p] -= 1
1036 1037
1037 1038 hist[f] = curr
1038 1039 del pcache[f]
1039 1040
1040 1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1041 1042
1042 1043 def ancestors(self, followfirst=False):
1043 1044 visit = {}
1044 1045 c = self
1045 1046 if followfirst:
1046 1047 cut = 1
1047 1048 else:
1048 1049 cut = None
1049 1050
1050 1051 while True:
1051 1052 for parent in c.parents()[:cut]:
1052 1053 visit[(parent.linkrev(), parent.filenode())] = parent
1053 1054 if not visit:
1054 1055 break
1055 1056 c = visit.pop(max(visit))
1056 1057 yield c
1057 1058
1058 1059 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1059 1060 r'''
1060 1061 Given parent and child fctxes and annotate data for parents, for all lines
1061 1062 in either parent that match the child, annotate the child with the parent's
1062 1063 data.
1063 1064
1064 1065 Additionally, if `skipchild` is True, replace all other lines with parent
1065 1066 annotate data as well such that child is never blamed for any lines.
1066 1067
1067 1068 >>> oldfctx = 'old'
1068 1069 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1069 1070 >>> olddata = 'a\nb\n'
1070 1071 >>> p1data = 'a\nb\nc\n'
1071 1072 >>> p2data = 'a\nc\nd\n'
1072 1073 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1073 1074 >>> diffopts = mdiff.diffopts()
1074 1075
1075 1076 >>> def decorate(text, rev):
1076 1077 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1077 1078
1078 1079 Basic usage:
1079 1080
1080 1081 >>> oldann = decorate(olddata, oldfctx)
1081 1082 >>> p1ann = decorate(p1data, p1fctx)
1082 1083 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1083 1084 >>> p1ann[0]
1084 1085 [('old', 1), ('old', 2), ('p1', 3)]
1085 1086 >>> p2ann = decorate(p2data, p2fctx)
1086 1087 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1087 1088 >>> p2ann[0]
1088 1089 [('old', 1), ('p2', 2), ('p2', 3)]
1089 1090
1090 1091 Test with multiple parents (note the difference caused by ordering):
1091 1092
1092 1093 >>> childann = decorate(childdata, childfctx)
1093 1094 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1094 1095 ... diffopts)
1095 1096 >>> childann[0]
1096 1097 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1097 1098
1098 1099 >>> childann = decorate(childdata, childfctx)
1099 1100 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1100 1101 ... diffopts)
1101 1102 >>> childann[0]
1102 1103 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1103 1104
1104 1105 Test with skipchild (note the difference caused by ordering):
1105 1106
1106 1107 >>> childann = decorate(childdata, childfctx)
1107 1108 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1108 1109 ... diffopts)
1109 1110 >>> childann[0]
1110 1111 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1111 1112
1112 1113 >>> childann = decorate(childdata, childfctx)
1113 1114 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1114 1115 ... diffopts)
1115 1116 >>> childann[0]
1116 1117 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1117 1118 '''
1118 1119 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1119 1120 for parent in parents]
1120 1121
1121 1122 if skipchild:
1122 1123 # Need to iterate over the blocks twice -- make it a list
1123 1124 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1124 1125 # Mercurial currently prefers p2 over p1 for annotate.
1125 1126 # TODO: change this?
1126 1127 for parent, blocks in pblocks:
1127 1128 for (a1, a2, b1, b2), t in blocks:
1128 1129 # Changed blocks ('!') or blocks made only of blank lines ('~')
1129 1130 # belong to the child.
1130 1131 if t == '=':
1131 1132 child[0][b1:b2] = parent[0][a1:a2]
1132 1133
1133 1134 if skipchild:
1134 1135 # Now try and match up anything that couldn't be matched,
1135 1136 # Reversing pblocks maintains bias towards p2, matching above
1136 1137 # behavior.
1137 1138 pblocks.reverse()
1138 1139
1139 1140 # The heuristics are:
1140 1141 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1141 1142 # This could potentially be smarter but works well enough.
1142 1143 # * For a non-matching section, do a best-effort fit. Match lines in
1143 1144 # diff hunks 1:1, dropping lines as necessary.
1144 1145 # * Repeat the last line as a last resort.
1145 1146
1146 1147 # First, replace as much as possible without repeating the last line.
1147 1148 remaining = [(parent, []) for parent, _blocks in pblocks]
1148 1149 for idx, (parent, blocks) in enumerate(pblocks):
1149 1150 for (a1, a2, b1, b2), _t in blocks:
1150 1151 if a2 - a1 >= b2 - b1:
1151 1152 for bk in xrange(b1, b2):
1152 1153 if child[0][bk][0] == childfctx:
1153 1154 ak = min(a1 + (bk - b1), a2 - 1)
1154 1155 child[0][bk] = parent[0][ak]
1155 1156 else:
1156 1157 remaining[idx][1].append((a1, a2, b1, b2))
1157 1158
1158 1159 # Then, look at anything left, which might involve repeating the last
1159 1160 # line.
1160 1161 for parent, blocks in remaining:
1161 1162 for a1, a2, b1, b2 in blocks:
1162 1163 for bk in xrange(b1, b2):
1163 1164 if child[0][bk][0] == childfctx:
1164 1165 ak = min(a1 + (bk - b1), a2 - 1)
1165 1166 child[0][bk] = parent[0][ak]
1166 1167 return child
1167 1168
1168 1169 class filectx(basefilectx):
1169 1170 """A filecontext object makes access to data related to a particular
1170 1171 filerevision convenient."""
1171 1172 def __init__(self, repo, path, changeid=None, fileid=None,
1172 1173 filelog=None, changectx=None):
1173 1174 """changeid can be a changeset revision, node, or tag.
1174 1175 fileid can be a file revision or node."""
1175 1176 self._repo = repo
1176 1177 self._path = path
1177 1178
1178 1179 assert (changeid is not None
1179 1180 or fileid is not None
1180 1181 or changectx is not None), \
1181 1182 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1182 1183 % (changeid, fileid, changectx))
1183 1184
1184 1185 if filelog is not None:
1185 1186 self._filelog = filelog
1186 1187
1187 1188 if changeid is not None:
1188 1189 self._changeid = changeid
1189 1190 if changectx is not None:
1190 1191 self._changectx = changectx
1191 1192 if fileid is not None:
1192 1193 self._fileid = fileid
1193 1194
1194 1195 @propertycache
1195 1196 def _changectx(self):
1196 1197 try:
1197 1198 return changectx(self._repo, self._changeid)
1198 1199 except error.FilteredRepoLookupError:
1199 1200 # Linkrev may point to any revision in the repository. When the
1200 1201 # repository is filtered this may lead to `filectx` trying to build
1201 1202 # `changectx` for filtered revision. In such case we fallback to
1202 1203 # creating `changectx` on the unfiltered version of the reposition.
1203 1204 # This fallback should not be an issue because `changectx` from
1204 1205 # `filectx` are not used in complex operations that care about
1205 1206 # filtering.
1206 1207 #
1207 1208 # This fallback is a cheap and dirty fix that prevent several
1208 1209 # crashes. It does not ensure the behavior is correct. However the
1209 1210 # behavior was not correct before filtering either and "incorrect
1210 1211 # behavior" is seen as better as "crash"
1211 1212 #
1212 1213 # Linkrevs have several serious troubles with filtering that are
1213 1214 # complicated to solve. Proper handling of the issue here should be
1214 1215 # considered when solving linkrev issue are on the table.
1215 1216 return changectx(self._repo.unfiltered(), self._changeid)
1216 1217
1217 1218 def filectx(self, fileid, changeid=None):
1218 1219 '''opens an arbitrary revision of the file without
1219 1220 opening a new filelog'''
1220 1221 return filectx(self._repo, self._path, fileid=fileid,
1221 1222 filelog=self._filelog, changeid=changeid)
1222 1223
1223 1224 def rawdata(self):
1224 1225 return self._filelog.revision(self._filenode, raw=True)
1225 1226
1226 1227 def rawflags(self):
1227 1228 """low-level revlog flags"""
1228 1229 return self._filelog.flags(self._filerev)
1229 1230
1230 1231 def data(self):
1231 1232 try:
1232 1233 return self._filelog.read(self._filenode)
1233 1234 except error.CensoredNodeError:
1234 1235 if self._repo.ui.config("censor", "policy") == "ignore":
1235 1236 return ""
1236 1237 raise error.Abort(_("censored node: %s") % short(self._filenode),
1237 1238 hint=_("set censor.policy to ignore errors"))
1238 1239
1239 1240 def size(self):
1240 1241 return self._filelog.size(self._filerev)
1241 1242
1242 1243 @propertycache
1243 1244 def _copied(self):
1244 1245 """check if file was actually renamed in this changeset revision
1245 1246
1246 1247 If rename logged in file revision, we report copy for changeset only
1247 1248 if file revisions linkrev points back to the changeset in question
1248 1249 or both changeset parents contain different file revisions.
1249 1250 """
1250 1251
1251 1252 renamed = self._filelog.renamed(self._filenode)
1252 1253 if not renamed:
1253 1254 return renamed
1254 1255
1255 1256 if self.rev() == self.linkrev():
1256 1257 return renamed
1257 1258
1258 1259 name = self.path()
1259 1260 fnode = self._filenode
1260 1261 for p in self._changectx.parents():
1261 1262 try:
1262 1263 if fnode == p.filenode(name):
1263 1264 return None
1264 1265 except error.LookupError:
1265 1266 pass
1266 1267 return renamed
1267 1268
1268 1269 def children(self):
1269 1270 # hard for renames
1270 1271 c = self._filelog.children(self._filenode)
1271 1272 return [filectx(self._repo, self._path, fileid=x,
1272 1273 filelog=self._filelog) for x in c]
1273 1274
1274 1275 class committablectx(basectx):
1275 1276 """A committablectx object provides common functionality for a context that
1276 1277 wants the ability to commit, e.g. workingctx or memctx."""
1277 1278 def __init__(self, repo, text="", user=None, date=None, extra=None,
1278 1279 changes=None):
1279 1280 self._repo = repo
1280 1281 self._rev = None
1281 1282 self._node = None
1282 1283 self._text = text
1283 1284 if date:
1284 1285 self._date = util.parsedate(date)
1285 1286 if user:
1286 1287 self._user = user
1287 1288 if changes:
1288 1289 self._status = changes
1289 1290
1290 1291 self._extra = {}
1291 1292 if extra:
1292 1293 self._extra = extra.copy()
1293 1294 if 'branch' not in self._extra:
1294 1295 try:
1295 1296 branch = encoding.fromlocal(self._repo.dirstate.branch())
1296 1297 except UnicodeDecodeError:
1297 1298 raise error.Abort(_('branch name not in UTF-8!'))
1298 1299 self._extra['branch'] = branch
1299 1300 if self._extra['branch'] == '':
1300 1301 self._extra['branch'] = 'default'
1301 1302
1302 1303 def __bytes__(self):
1303 1304 return bytes(self._parents[0]) + "+"
1304 1305
1305 1306 __str__ = encoding.strmethod(__bytes__)
1306 1307
1307 1308 def __nonzero__(self):
1308 1309 return True
1309 1310
1310 1311 __bool__ = __nonzero__
1311 1312
1312 1313 def _buildflagfunc(self):
1313 1314 # Create a fallback function for getting file flags when the
1314 1315 # filesystem doesn't support them
1315 1316
1316 1317 copiesget = self._repo.dirstate.copies().get
1317 1318 parents = self.parents()
1318 1319 if len(parents) < 2:
1319 1320 # when we have one parent, it's easy: copy from parent
1320 1321 man = parents[0].manifest()
1321 1322 def func(f):
1322 1323 f = copiesget(f, f)
1323 1324 return man.flags(f)
1324 1325 else:
1325 1326 # merges are tricky: we try to reconstruct the unstored
1326 1327 # result from the merge (issue1802)
1327 1328 p1, p2 = parents
1328 1329 pa = p1.ancestor(p2)
1329 1330 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1330 1331
1331 1332 def func(f):
1332 1333 f = copiesget(f, f) # may be wrong for merges with copies
1333 1334 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1334 1335 if fl1 == fl2:
1335 1336 return fl1
1336 1337 if fl1 == fla:
1337 1338 return fl2
1338 1339 if fl2 == fla:
1339 1340 return fl1
1340 1341 return '' # punt for conflicts
1341 1342
1342 1343 return func
1343 1344
1344 1345 @propertycache
1345 1346 def _flagfunc(self):
1346 1347 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1347 1348
1348 1349 @propertycache
1349 1350 def _status(self):
1350 1351 return self._repo.status()
1351 1352
1352 1353 @propertycache
1353 1354 def _user(self):
1354 1355 return self._repo.ui.username()
1355 1356
1356 1357 @propertycache
1357 1358 def _date(self):
1358 1359 ui = self._repo.ui
1359 1360 date = ui.configdate('devel', 'default-date')
1360 1361 if date is None:
1361 1362 date = util.makedate()
1362 1363 return date
1363 1364
1364 1365 def subrev(self, subpath):
1365 1366 return None
1366 1367
1367 1368 def manifestnode(self):
1368 1369 return None
1369 1370 def user(self):
1370 1371 return self._user or self._repo.ui.username()
1371 1372 def date(self):
1372 1373 return self._date
1373 1374 def description(self):
1374 1375 return self._text
1375 1376 def files(self):
1376 1377 return sorted(self._status.modified + self._status.added +
1377 1378 self._status.removed)
1378 1379
1379 1380 def modified(self):
1380 1381 return self._status.modified
1381 1382 def added(self):
1382 1383 return self._status.added
1383 1384 def removed(self):
1384 1385 return self._status.removed
1385 1386 def deleted(self):
1386 1387 return self._status.deleted
1387 1388 def branch(self):
1388 1389 return encoding.tolocal(self._extra['branch'])
1389 1390 def closesbranch(self):
1390 1391 return 'close' in self._extra
1391 1392 def extra(self):
1392 1393 return self._extra
1393 1394
1394 1395 def tags(self):
1395 1396 return []
1396 1397
1397 1398 def bookmarks(self):
1398 1399 b = []
1399 1400 for p in self.parents():
1400 1401 b.extend(p.bookmarks())
1401 1402 return b
1402 1403
1403 1404 def phase(self):
1404 1405 phase = phases.draft # default phase to draft
1405 1406 for p in self.parents():
1406 1407 phase = max(phase, p.phase())
1407 1408 return phase
1408 1409
1409 1410 def hidden(self):
1410 1411 return False
1411 1412
1412 1413 def children(self):
1413 1414 return []
1414 1415
1415 1416 def flags(self, path):
1416 1417 if r'_manifest' in self.__dict__:
1417 1418 try:
1418 1419 return self._manifest.flags(path)
1419 1420 except KeyError:
1420 1421 return ''
1421 1422
1422 1423 try:
1423 1424 return self._flagfunc(path)
1424 1425 except OSError:
1425 1426 return ''
1426 1427
1427 1428 def ancestor(self, c2):
1428 1429 """return the "best" ancestor context of self and c2"""
1429 1430 return self._parents[0].ancestor(c2) # punt on two parents for now
1430 1431
1431 1432 def walk(self, match):
1432 1433 '''Generates matching file names.'''
1433 1434 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1434 1435 True, False))
1435 1436
1436 1437 def matches(self, match):
1437 1438 return sorted(self._repo.dirstate.matches(match))
1438 1439
1439 1440 def ancestors(self):
1440 1441 for p in self._parents:
1441 1442 yield p
1442 1443 for a in self._repo.changelog.ancestors(
1443 1444 [p.rev() for p in self._parents]):
1444 1445 yield changectx(self._repo, a)
1445 1446
1446 1447 def markcommitted(self, node):
1447 1448 """Perform post-commit cleanup necessary after committing this ctx
1448 1449
1449 1450 Specifically, this updates backing stores this working context
1450 1451 wraps to reflect the fact that the changes reflected by this
1451 1452 workingctx have been committed. For example, it marks
1452 1453 modified and added files as normal in the dirstate.
1453 1454
1454 1455 """
1455 1456
1456 1457 with self._repo.dirstate.parentchange():
1457 1458 for f in self.modified() + self.added():
1458 1459 self._repo.dirstate.normal(f)
1459 1460 for f in self.removed():
1460 1461 self._repo.dirstate.drop(f)
1461 1462 self._repo.dirstate.setparents(node)
1462 1463
1463 1464 # write changes out explicitly, because nesting wlock at
1464 1465 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1465 1466 # from immediately doing so for subsequent changing files
1466 1467 self._repo.dirstate.write(self._repo.currenttransaction())
1467 1468
1468 1469 def dirty(self, missing=False, merge=True, branch=True):
1469 1470 return False
1470 1471
1471 1472 class workingctx(committablectx):
1472 1473 """A workingctx object makes access to data related to
1473 1474 the current working directory convenient.
1474 1475 date - any valid date string or (unixtime, offset), or None.
1475 1476 user - username string, or None.
1476 1477 extra - a dictionary of extra values, or None.
1477 1478 changes - a list of file lists as returned by localrepo.status()
1478 1479 or None to use the repository status.
1479 1480 """
1480 1481 def __init__(self, repo, text="", user=None, date=None, extra=None,
1481 1482 changes=None):
1482 1483 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1483 1484
1484 1485 def __iter__(self):
1485 1486 d = self._repo.dirstate
1486 1487 for f in d:
1487 1488 if d[f] != 'r':
1488 1489 yield f
1489 1490
1490 1491 def __contains__(self, key):
1491 1492 return self._repo.dirstate[key] not in "?r"
1492 1493
1493 1494 def hex(self):
1494 1495 return hex(wdirid)
1495 1496
1496 1497 @propertycache
1497 1498 def _parents(self):
1498 1499 p = self._repo.dirstate.parents()
1499 1500 if p[1] == nullid:
1500 1501 p = p[:-1]
1501 1502 return [changectx(self._repo, x) for x in p]
1502 1503
1503 1504 def filectx(self, path, filelog=None):
1504 1505 """get a file context from the working directory"""
1505 1506 return workingfilectx(self._repo, path, workingctx=self,
1506 1507 filelog=filelog)
1507 1508
1508 1509 def dirty(self, missing=False, merge=True, branch=True):
1509 1510 "check whether a working directory is modified"
1510 1511 # check subrepos first
1511 1512 for s in sorted(self.substate):
1512 1513 if self.sub(s).dirty(missing=missing):
1513 1514 return True
1514 1515 # check current working dir
1515 1516 return ((merge and self.p2()) or
1516 1517 (branch and self.branch() != self.p1().branch()) or
1517 1518 self.modified() or self.added() or self.removed() or
1518 1519 (missing and self.deleted()))
1519 1520
1520 1521 def add(self, list, prefix=""):
1521 join = lambda f: os.path.join(prefix, f)
1522 1522 with self._repo.wlock():
1523 1523 ui, ds = self._repo.ui, self._repo.dirstate
1524 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1524 1525 rejected = []
1525 1526 lstat = self._repo.wvfs.lstat
1526 1527 for f in list:
1527 scmutil.checkportable(ui, join(f))
1528 # ds.pathto() returns an absolute file when this is invoked from
1529 # the keyword extension. That gets flagged as non-portable on
1530 # Windows, since it contains the drive letter and colon.
1531 scmutil.checkportable(ui, os.path.join(prefix, f))
1528 1532 try:
1529 1533 st = lstat(f)
1530 1534 except OSError:
1531 ui.warn(_("%s does not exist!\n") % join(f))
1535 ui.warn(_("%s does not exist!\n") % uipath(f))
1532 1536 rejected.append(f)
1533 1537 continue
1534 1538 if st.st_size > 10000000:
1535 1539 ui.warn(_("%s: up to %d MB of RAM may be required "
1536 1540 "to manage this file\n"
1537 1541 "(use 'hg revert %s' to cancel the "
1538 1542 "pending addition)\n")
1539 % (f, 3 * st.st_size // 1000000, join(f)))
1543 % (f, 3 * st.st_size // 1000000, uipath(f)))
1540 1544 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1541 1545 ui.warn(_("%s not added: only files and symlinks "
1542 "supported currently\n") % join(f))
1546 "supported currently\n") % uipath(f))
1543 1547 rejected.append(f)
1544 1548 elif ds[f] in 'amn':
1545 ui.warn(_("%s already tracked!\n") % join(f))
1549 ui.warn(_("%s already tracked!\n") % uipath(f))
1546 1550 elif ds[f] == 'r':
1547 1551 ds.normallookup(f)
1548 1552 else:
1549 1553 ds.add(f)
1550 1554 return rejected
1551 1555
1552 1556 def forget(self, files, prefix=""):
1553 join = lambda f: os.path.join(prefix, f)
1554 1557 with self._repo.wlock():
1558 ds = self._repo.dirstate
1559 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1555 1560 rejected = []
1556 1561 for f in files:
1557 1562 if f not in self._repo.dirstate:
1558 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1563 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1559 1564 rejected.append(f)
1560 1565 elif self._repo.dirstate[f] != 'a':
1561 1566 self._repo.dirstate.remove(f)
1562 1567 else:
1563 1568 self._repo.dirstate.drop(f)
1564 1569 return rejected
1565 1570
1566 1571 def undelete(self, list):
1567 1572 pctxs = self.parents()
1568 1573 with self._repo.wlock():
1574 ds = self._repo.dirstate
1569 1575 for f in list:
1570 1576 if self._repo.dirstate[f] != 'r':
1571 self._repo.ui.warn(_("%s not removed!\n") % f)
1577 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1572 1578 else:
1573 1579 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1574 1580 t = fctx.data()
1575 1581 self._repo.wwrite(f, t, fctx.flags())
1576 1582 self._repo.dirstate.normal(f)
1577 1583
1578 1584 def copy(self, source, dest):
1579 1585 try:
1580 1586 st = self._repo.wvfs.lstat(dest)
1581 1587 except OSError as err:
1582 1588 if err.errno != errno.ENOENT:
1583 1589 raise
1584 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1590 self._repo.ui.warn(_("%s does not exist!\n")
1591 % self._repo.dirstate.pathto(dest))
1585 1592 return
1586 1593 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1587 1594 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1588 "symbolic link\n") % dest)
1595 "symbolic link\n")
1596 % self._repo.dirstate.pathto(dest))
1589 1597 else:
1590 1598 with self._repo.wlock():
1591 1599 if self._repo.dirstate[dest] in '?':
1592 1600 self._repo.dirstate.add(dest)
1593 1601 elif self._repo.dirstate[dest] in 'r':
1594 1602 self._repo.dirstate.normallookup(dest)
1595 1603 self._repo.dirstate.copy(source, dest)
1596 1604
1597 1605 def match(self, pats=None, include=None, exclude=None, default='glob',
1598 1606 listsubrepos=False, badfn=None):
1599 1607 r = self._repo
1600 1608
1601 1609 # Only a case insensitive filesystem needs magic to translate user input
1602 1610 # to actual case in the filesystem.
1603 1611 icasefs = not util.fscasesensitive(r.root)
1604 1612 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1605 1613 default, auditor=r.auditor, ctx=self,
1606 1614 listsubrepos=listsubrepos, badfn=badfn,
1607 1615 icasefs=icasefs)
1608 1616
1609 1617 def _filtersuspectsymlink(self, files):
1610 1618 if not files or self._repo.dirstate._checklink:
1611 1619 return files
1612 1620
1613 1621 # Symlink placeholders may get non-symlink-like contents
1614 1622 # via user error or dereferencing by NFS or Samba servers,
1615 1623 # so we filter out any placeholders that don't look like a
1616 1624 # symlink
1617 1625 sane = []
1618 1626 for f in files:
1619 1627 if self.flags(f) == 'l':
1620 1628 d = self[f].data()
1621 1629 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1622 1630 self._repo.ui.debug('ignoring suspect symlink placeholder'
1623 1631 ' "%s"\n' % f)
1624 1632 continue
1625 1633 sane.append(f)
1626 1634 return sane
1627 1635
1628 1636 def _checklookup(self, files):
1629 1637 # check for any possibly clean files
1630 1638 if not files:
1631 1639 return [], [], []
1632 1640
1633 1641 modified = []
1634 1642 deleted = []
1635 1643 fixup = []
1636 1644 pctx = self._parents[0]
1637 1645 # do a full compare of any files that might have changed
1638 1646 for f in sorted(files):
1639 1647 try:
1640 1648 # This will return True for a file that got replaced by a
1641 1649 # directory in the interim, but fixing that is pretty hard.
1642 1650 if (f not in pctx or self.flags(f) != pctx.flags(f)
1643 1651 or pctx[f].cmp(self[f])):
1644 1652 modified.append(f)
1645 1653 else:
1646 1654 fixup.append(f)
1647 1655 except (IOError, OSError):
1648 1656 # A file become inaccessible in between? Mark it as deleted,
1649 1657 # matching dirstate behavior (issue5584).
1650 1658 # The dirstate has more complex behavior around whether a
1651 1659 # missing file matches a directory, etc, but we don't need to
1652 1660 # bother with that: if f has made it to this point, we're sure
1653 1661 # it's in the dirstate.
1654 1662 deleted.append(f)
1655 1663
1656 1664 return modified, deleted, fixup
1657 1665
1658 1666 def _poststatusfixup(self, status, fixup):
1659 1667 """update dirstate for files that are actually clean"""
1660 1668 poststatus = self._repo.postdsstatus()
1661 1669 if fixup or poststatus:
1662 1670 try:
1663 1671 oldid = self._repo.dirstate.identity()
1664 1672
1665 1673 # updating the dirstate is optional
1666 1674 # so we don't wait on the lock
1667 1675 # wlock can invalidate the dirstate, so cache normal _after_
1668 1676 # taking the lock
1669 1677 with self._repo.wlock(False):
1670 1678 if self._repo.dirstate.identity() == oldid:
1671 1679 if fixup:
1672 1680 normal = self._repo.dirstate.normal
1673 1681 for f in fixup:
1674 1682 normal(f)
1675 1683 # write changes out explicitly, because nesting
1676 1684 # wlock at runtime may prevent 'wlock.release()'
1677 1685 # after this block from doing so for subsequent
1678 1686 # changing files
1679 1687 tr = self._repo.currenttransaction()
1680 1688 self._repo.dirstate.write(tr)
1681 1689
1682 1690 if poststatus:
1683 1691 for ps in poststatus:
1684 1692 ps(self, status)
1685 1693 else:
1686 1694 # in this case, writing changes out breaks
1687 1695 # consistency, because .hg/dirstate was
1688 1696 # already changed simultaneously after last
1689 1697 # caching (see also issue5584 for detail)
1690 1698 self._repo.ui.debug('skip updating dirstate: '
1691 1699 'identity mismatch\n')
1692 1700 except error.LockError:
1693 1701 pass
1694 1702 finally:
1695 1703 # Even if the wlock couldn't be grabbed, clear out the list.
1696 1704 self._repo.clearpostdsstatus()
1697 1705
1698 1706 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1699 1707 unknown=False):
1700 1708 '''Gets the status from the dirstate -- internal use only.'''
1701 1709 listignored, listclean, listunknown = ignored, clean, unknown
1702 1710 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1703 1711 subrepos = []
1704 1712 if '.hgsub' in self:
1705 1713 subrepos = sorted(self.substate)
1706 1714 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1707 1715 listclean, listunknown)
1708 1716
1709 1717 # check for any possibly clean files
1710 1718 fixup = []
1711 1719 if cmp:
1712 1720 modified2, deleted2, fixup = self._checklookup(cmp)
1713 1721 s.modified.extend(modified2)
1714 1722 s.deleted.extend(deleted2)
1715 1723
1716 1724 if fixup and listclean:
1717 1725 s.clean.extend(fixup)
1718 1726
1719 1727 self._poststatusfixup(s, fixup)
1720 1728
1721 1729 if match.always():
1722 1730 # cache for performance
1723 1731 if s.unknown or s.ignored or s.clean:
1724 1732 # "_status" is cached with list*=False in the normal route
1725 1733 self._status = scmutil.status(s.modified, s.added, s.removed,
1726 1734 s.deleted, [], [], [])
1727 1735 else:
1728 1736 self._status = s
1729 1737
1730 1738 return s
1731 1739
1732 1740 @propertycache
1733 1741 def _manifest(self):
1734 1742 """generate a manifest corresponding to the values in self._status
1735 1743
1736 1744 This reuse the file nodeid from parent, but we use special node
1737 1745 identifiers for added and modified files. This is used by manifests
1738 1746 merge to see that files are different and by update logic to avoid
1739 1747 deleting newly added files.
1740 1748 """
1741 1749 return self._buildstatusmanifest(self._status)
1742 1750
1743 1751 def _buildstatusmanifest(self, status):
1744 1752 """Builds a manifest that includes the given status results."""
1745 1753 parents = self.parents()
1746 1754
1747 1755 man = parents[0].manifest().copy()
1748 1756
1749 1757 ff = self._flagfunc
1750 1758 for i, l in ((addednodeid, status.added),
1751 1759 (modifiednodeid, status.modified)):
1752 1760 for f in l:
1753 1761 man[f] = i
1754 1762 try:
1755 1763 man.setflag(f, ff(f))
1756 1764 except OSError:
1757 1765 pass
1758 1766
1759 1767 for f in status.deleted + status.removed:
1760 1768 if f in man:
1761 1769 del man[f]
1762 1770
1763 1771 return man
1764 1772
1765 1773 def _buildstatus(self, other, s, match, listignored, listclean,
1766 1774 listunknown):
1767 1775 """build a status with respect to another context
1768 1776
1769 1777 This includes logic for maintaining the fast path of status when
1770 1778 comparing the working directory against its parent, which is to skip
1771 1779 building a new manifest if self (working directory) is not comparing
1772 1780 against its parent (repo['.']).
1773 1781 """
1774 1782 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1775 1783 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1776 1784 # might have accidentally ended up with the entire contents of the file
1777 1785 # they are supposed to be linking to.
1778 1786 s.modified[:] = self._filtersuspectsymlink(s.modified)
1779 1787 if other != self._repo['.']:
1780 1788 s = super(workingctx, self)._buildstatus(other, s, match,
1781 1789 listignored, listclean,
1782 1790 listunknown)
1783 1791 return s
1784 1792
1785 1793 def _matchstatus(self, other, match):
1786 1794 """override the match method with a filter for directory patterns
1787 1795
1788 1796 We use inheritance to customize the match.bad method only in cases of
1789 1797 workingctx since it belongs only to the working directory when
1790 1798 comparing against the parent changeset.
1791 1799
1792 1800 If we aren't comparing against the working directory's parent, then we
1793 1801 just use the default match object sent to us.
1794 1802 """
1795 1803 superself = super(workingctx, self)
1796 1804 match = superself._matchstatus(other, match)
1797 1805 if other != self._repo['.']:
1798 1806 def bad(f, msg):
1799 1807 # 'f' may be a directory pattern from 'match.files()',
1800 1808 # so 'f not in ctx1' is not enough
1801 1809 if f not in other and not other.hasdir(f):
1802 1810 self._repo.ui.warn('%s: %s\n' %
1803 1811 (self._repo.dirstate.pathto(f), msg))
1804 1812 match.bad = bad
1805 1813 return match
1806 1814
1807 1815 def markcommitted(self, node):
1808 1816 super(workingctx, self).markcommitted(node)
1809 1817
1810 1818 sparse.aftercommit(self._repo, node)
1811 1819
1812 1820 class committablefilectx(basefilectx):
1813 1821 """A committablefilectx provides common functionality for a file context
1814 1822 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1815 1823 def __init__(self, repo, path, filelog=None, ctx=None):
1816 1824 self._repo = repo
1817 1825 self._path = path
1818 1826 self._changeid = None
1819 1827 self._filerev = self._filenode = None
1820 1828
1821 1829 if filelog is not None:
1822 1830 self._filelog = filelog
1823 1831 if ctx:
1824 1832 self._changectx = ctx
1825 1833
1826 1834 def __nonzero__(self):
1827 1835 return True
1828 1836
1829 1837 __bool__ = __nonzero__
1830 1838
1831 1839 def linkrev(self):
1832 1840 # linked to self._changectx no matter if file is modified or not
1833 1841 return self.rev()
1834 1842
1835 1843 def parents(self):
1836 1844 '''return parent filectxs, following copies if necessary'''
1837 1845 def filenode(ctx, path):
1838 1846 return ctx._manifest.get(path, nullid)
1839 1847
1840 1848 path = self._path
1841 1849 fl = self._filelog
1842 1850 pcl = self._changectx._parents
1843 1851 renamed = self.renamed()
1844 1852
1845 1853 if renamed:
1846 1854 pl = [renamed + (None,)]
1847 1855 else:
1848 1856 pl = [(path, filenode(pcl[0], path), fl)]
1849 1857
1850 1858 for pc in pcl[1:]:
1851 1859 pl.append((path, filenode(pc, path), fl))
1852 1860
1853 1861 return [self._parentfilectx(p, fileid=n, filelog=l)
1854 1862 for p, n, l in pl if n != nullid]
1855 1863
1856 1864 def children(self):
1857 1865 return []
1858 1866
1859 1867 class workingfilectx(committablefilectx):
1860 1868 """A workingfilectx object makes access to data related to a particular
1861 1869 file in the working directory convenient."""
1862 1870 def __init__(self, repo, path, filelog=None, workingctx=None):
1863 1871 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1864 1872
1865 1873 @propertycache
1866 1874 def _changectx(self):
1867 1875 return workingctx(self._repo)
1868 1876
1869 1877 def data(self):
1870 1878 return self._repo.wread(self._path)
1871 1879 def renamed(self):
1872 1880 rp = self._repo.dirstate.copied(self._path)
1873 1881 if not rp:
1874 1882 return None
1875 1883 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1876 1884
1877 1885 def size(self):
1878 1886 return self._repo.wvfs.lstat(self._path).st_size
1879 1887 def date(self):
1880 1888 t, tz = self._changectx.date()
1881 1889 try:
1882 1890 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1883 1891 except OSError as err:
1884 1892 if err.errno != errno.ENOENT:
1885 1893 raise
1886 1894 return (t, tz)
1887 1895
1888 1896 def exists(self):
1889 1897 return self._repo.wvfs.exists(self._path)
1890 1898
1891 1899 def lexists(self):
1892 1900 return self._repo.wvfs.lexists(self._path)
1893 1901
1894 1902 def audit(self):
1895 1903 return self._repo.wvfs.audit(self._path)
1896 1904
1897 1905 def cmp(self, fctx):
1898 1906 """compare with other file context
1899 1907
1900 1908 returns True if different than fctx.
1901 1909 """
1902 1910 # fctx should be a filectx (not a workingfilectx)
1903 1911 # invert comparison to reuse the same code path
1904 1912 return fctx.cmp(self)
1905 1913
1906 1914 def remove(self, ignoremissing=False):
1907 1915 """wraps unlink for a repo's working directory"""
1908 1916 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1909 1917
1910 1918 def write(self, data, flags, backgroundclose=False):
1911 1919 """wraps repo.wwrite"""
1912 1920 self._repo.wwrite(self._path, data, flags,
1913 1921 backgroundclose=backgroundclose)
1914 1922
1915 1923 def setflags(self, l, x):
1916 1924 self._repo.wvfs.setflags(self._path, l, x)
1917 1925
1918 1926 class workingcommitctx(workingctx):
1919 1927 """A workingcommitctx object makes access to data related to
1920 1928 the revision being committed convenient.
1921 1929
1922 1930 This hides changes in the working directory, if they aren't
1923 1931 committed in this context.
1924 1932 """
1925 1933 def __init__(self, repo, changes,
1926 1934 text="", user=None, date=None, extra=None):
1927 1935 super(workingctx, self).__init__(repo, text, user, date, extra,
1928 1936 changes)
1929 1937
1930 1938 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1931 1939 unknown=False):
1932 1940 """Return matched files only in ``self._status``
1933 1941
1934 1942 Uncommitted files appear "clean" via this context, even if
1935 1943 they aren't actually so in the working directory.
1936 1944 """
1937 1945 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1938 1946 if clean:
1939 1947 clean = [f for f in self._manifest if f not in self._changedset]
1940 1948 else:
1941 1949 clean = []
1942 1950 return scmutil.status([f for f in self._status.modified if match(f)],
1943 1951 [f for f in self._status.added if match(f)],
1944 1952 [f for f in self._status.removed if match(f)],
1945 1953 [], [], [], clean)
1946 1954
1947 1955 @propertycache
1948 1956 def _changedset(self):
1949 1957 """Return the set of files changed in this context
1950 1958 """
1951 1959 changed = set(self._status.modified)
1952 1960 changed.update(self._status.added)
1953 1961 changed.update(self._status.removed)
1954 1962 return changed
1955 1963
1956 1964 def makecachingfilectxfn(func):
1957 1965 """Create a filectxfn that caches based on the path.
1958 1966
1959 1967 We can't use util.cachefunc because it uses all arguments as the cache
1960 1968 key and this creates a cycle since the arguments include the repo and
1961 1969 memctx.
1962 1970 """
1963 1971 cache = {}
1964 1972
1965 1973 def getfilectx(repo, memctx, path):
1966 1974 if path not in cache:
1967 1975 cache[path] = func(repo, memctx, path)
1968 1976 return cache[path]
1969 1977
1970 1978 return getfilectx
1971 1979
1972 1980 def memfilefromctx(ctx):
1973 1981 """Given a context return a memfilectx for ctx[path]
1974 1982
1975 1983 This is a convenience method for building a memctx based on another
1976 1984 context.
1977 1985 """
1978 1986 def getfilectx(repo, memctx, path):
1979 1987 fctx = ctx[path]
1980 1988 # this is weird but apparently we only keep track of one parent
1981 1989 # (why not only store that instead of a tuple?)
1982 1990 copied = fctx.renamed()
1983 1991 if copied:
1984 1992 copied = copied[0]
1985 1993 return memfilectx(repo, path, fctx.data(),
1986 1994 islink=fctx.islink(), isexec=fctx.isexec(),
1987 1995 copied=copied, memctx=memctx)
1988 1996
1989 1997 return getfilectx
1990 1998
1991 1999 def memfilefrompatch(patchstore):
1992 2000 """Given a patch (e.g. patchstore object) return a memfilectx
1993 2001
1994 2002 This is a convenience method for building a memctx based on a patchstore.
1995 2003 """
1996 2004 def getfilectx(repo, memctx, path):
1997 2005 data, mode, copied = patchstore.getfile(path)
1998 2006 if data is None:
1999 2007 return None
2000 2008 islink, isexec = mode
2001 2009 return memfilectx(repo, path, data, islink=islink,
2002 2010 isexec=isexec, copied=copied,
2003 2011 memctx=memctx)
2004 2012
2005 2013 return getfilectx
2006 2014
2007 2015 class memctx(committablectx):
2008 2016 """Use memctx to perform in-memory commits via localrepo.commitctx().
2009 2017
2010 2018 Revision information is supplied at initialization time while
2011 2019 related files data and is made available through a callback
2012 2020 mechanism. 'repo' is the current localrepo, 'parents' is a
2013 2021 sequence of two parent revisions identifiers (pass None for every
2014 2022 missing parent), 'text' is the commit message and 'files' lists
2015 2023 names of files touched by the revision (normalized and relative to
2016 2024 repository root).
2017 2025
2018 2026 filectxfn(repo, memctx, path) is a callable receiving the
2019 2027 repository, the current memctx object and the normalized path of
2020 2028 requested file, relative to repository root. It is fired by the
2021 2029 commit function for every file in 'files', but calls order is
2022 2030 undefined. If the file is available in the revision being
2023 2031 committed (updated or added), filectxfn returns a memfilectx
2024 2032 object. If the file was removed, filectxfn return None for recent
2025 2033 Mercurial. Moved files are represented by marking the source file
2026 2034 removed and the new file added with copy information (see
2027 2035 memfilectx).
2028 2036
2029 2037 user receives the committer name and defaults to current
2030 2038 repository username, date is the commit date in any format
2031 2039 supported by util.parsedate() and defaults to current date, extra
2032 2040 is a dictionary of metadata or is left empty.
2033 2041 """
2034 2042
2035 2043 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2036 2044 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2037 2045 # this field to determine what to do in filectxfn.
2038 2046 _returnnoneformissingfiles = True
2039 2047
2040 2048 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2041 2049 date=None, extra=None, branch=None, editor=False):
2042 2050 super(memctx, self).__init__(repo, text, user, date, extra)
2043 2051 self._rev = None
2044 2052 self._node = None
2045 2053 parents = [(p or nullid) for p in parents]
2046 2054 p1, p2 = parents
2047 2055 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2048 2056 files = sorted(set(files))
2049 2057 self._files = files
2050 2058 if branch is not None:
2051 2059 self._extra['branch'] = encoding.fromlocal(branch)
2052 2060 self.substate = {}
2053 2061
2054 2062 if isinstance(filectxfn, patch.filestore):
2055 2063 filectxfn = memfilefrompatch(filectxfn)
2056 2064 elif not callable(filectxfn):
2057 2065 # if store is not callable, wrap it in a function
2058 2066 filectxfn = memfilefromctx(filectxfn)
2059 2067
2060 2068 # memoizing increases performance for e.g. vcs convert scenarios.
2061 2069 self._filectxfn = makecachingfilectxfn(filectxfn)
2062 2070
2063 2071 if editor:
2064 2072 self._text = editor(self._repo, self, [])
2065 2073 self._repo.savecommitmessage(self._text)
2066 2074
2067 2075 def filectx(self, path, filelog=None):
2068 2076 """get a file context from the working directory
2069 2077
2070 2078 Returns None if file doesn't exist and should be removed."""
2071 2079 return self._filectxfn(self._repo, self, path)
2072 2080
2073 2081 def commit(self):
2074 2082 """commit context to the repo"""
2075 2083 return self._repo.commitctx(self)
2076 2084
2077 2085 @propertycache
2078 2086 def _manifest(self):
2079 2087 """generate a manifest based on the return values of filectxfn"""
2080 2088
2081 2089 # keep this simple for now; just worry about p1
2082 2090 pctx = self._parents[0]
2083 2091 man = pctx.manifest().copy()
2084 2092
2085 2093 for f in self._status.modified:
2086 2094 p1node = nullid
2087 2095 p2node = nullid
2088 2096 p = pctx[f].parents() # if file isn't in pctx, check p2?
2089 2097 if len(p) > 0:
2090 2098 p1node = p[0].filenode()
2091 2099 if len(p) > 1:
2092 2100 p2node = p[1].filenode()
2093 2101 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2094 2102
2095 2103 for f in self._status.added:
2096 2104 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2097 2105
2098 2106 for f in self._status.removed:
2099 2107 if f in man:
2100 2108 del man[f]
2101 2109
2102 2110 return man
2103 2111
2104 2112 @propertycache
2105 2113 def _status(self):
2106 2114 """Calculate exact status from ``files`` specified at construction
2107 2115 """
2108 2116 man1 = self.p1().manifest()
2109 2117 p2 = self._parents[1]
2110 2118 # "1 < len(self._parents)" can't be used for checking
2111 2119 # existence of the 2nd parent, because "memctx._parents" is
2112 2120 # explicitly initialized by the list, of which length is 2.
2113 2121 if p2.node() != nullid:
2114 2122 man2 = p2.manifest()
2115 2123 managing = lambda f: f in man1 or f in man2
2116 2124 else:
2117 2125 managing = lambda f: f in man1
2118 2126
2119 2127 modified, added, removed = [], [], []
2120 2128 for f in self._files:
2121 2129 if not managing(f):
2122 2130 added.append(f)
2123 2131 elif self[f]:
2124 2132 modified.append(f)
2125 2133 else:
2126 2134 removed.append(f)
2127 2135
2128 2136 return scmutil.status(modified, added, removed, [], [], [], [])
2129 2137
2130 2138 class memfilectx(committablefilectx):
2131 2139 """memfilectx represents an in-memory file to commit.
2132 2140
2133 2141 See memctx and committablefilectx for more details.
2134 2142 """
2135 2143 def __init__(self, repo, path, data, islink=False,
2136 2144 isexec=False, copied=None, memctx=None):
2137 2145 """
2138 2146 path is the normalized file path relative to repository root.
2139 2147 data is the file content as a string.
2140 2148 islink is True if the file is a symbolic link.
2141 2149 isexec is True if the file is executable.
2142 2150 copied is the source file path if current file was copied in the
2143 2151 revision being committed, or None."""
2144 2152 super(memfilectx, self).__init__(repo, path, None, memctx)
2145 2153 self._data = data
2146 2154 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2147 2155 self._copied = None
2148 2156 if copied:
2149 2157 self._copied = (copied, nullid)
2150 2158
2151 2159 def data(self):
2152 2160 return self._data
2153 2161
2154 2162 def remove(self, ignoremissing=False):
2155 2163 """wraps unlink for a repo's working directory"""
2156 2164 # need to figure out what to do here
2157 2165 del self._changectx[self._path]
2158 2166
2159 2167 def write(self, data, flags):
2160 2168 """wraps repo.wwrite"""
2161 2169 self._data = data
2162 2170
2163 2171 class overlayfilectx(committablefilectx):
2164 2172 """Like memfilectx but take an original filectx and optional parameters to
2165 2173 override parts of it. This is useful when fctx.data() is expensive (i.e.
2166 2174 flag processor is expensive) and raw data, flags, and filenode could be
2167 2175 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2168 2176 """
2169 2177
2170 2178 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2171 2179 copied=None, ctx=None):
2172 2180 """originalfctx: filecontext to duplicate
2173 2181
2174 2182 datafunc: None or a function to override data (file content). It is a
2175 2183 function to be lazy. path, flags, copied, ctx: None or overridden value
2176 2184
2177 2185 copied could be (path, rev), or False. copied could also be just path,
2178 2186 and will be converted to (path, nullid). This simplifies some callers.
2179 2187 """
2180 2188
2181 2189 if path is None:
2182 2190 path = originalfctx.path()
2183 2191 if ctx is None:
2184 2192 ctx = originalfctx.changectx()
2185 2193 ctxmatch = lambda: True
2186 2194 else:
2187 2195 ctxmatch = lambda: ctx == originalfctx.changectx()
2188 2196
2189 2197 repo = originalfctx.repo()
2190 2198 flog = originalfctx.filelog()
2191 2199 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2192 2200
2193 2201 if copied is None:
2194 2202 copied = originalfctx.renamed()
2195 2203 copiedmatch = lambda: True
2196 2204 else:
2197 2205 if copied and not isinstance(copied, tuple):
2198 2206 # repo._filecommit will recalculate copyrev so nullid is okay
2199 2207 copied = (copied, nullid)
2200 2208 copiedmatch = lambda: copied == originalfctx.renamed()
2201 2209
2202 2210 # When data, copied (could affect data), ctx (could affect filelog
2203 2211 # parents) are not overridden, rawdata, rawflags, and filenode may be
2204 2212 # reused (repo._filecommit should double check filelog parents).
2205 2213 #
2206 2214 # path, flags are not hashed in filelog (but in manifestlog) so they do
2207 2215 # not affect reusable here.
2208 2216 #
2209 2217 # If ctx or copied is overridden to a same value with originalfctx,
2210 2218 # still consider it's reusable. originalfctx.renamed() may be a bit
2211 2219 # expensive so it's not called unless necessary. Assuming datafunc is
2212 2220 # always expensive, do not call it for this "reusable" test.
2213 2221 reusable = datafunc is None and ctxmatch() and copiedmatch()
2214 2222
2215 2223 if datafunc is None:
2216 2224 datafunc = originalfctx.data
2217 2225 if flags is None:
2218 2226 flags = originalfctx.flags()
2219 2227
2220 2228 self._datafunc = datafunc
2221 2229 self._flags = flags
2222 2230 self._copied = copied
2223 2231
2224 2232 if reusable:
2225 2233 # copy extra fields from originalfctx
2226 2234 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2227 2235 for attr in attrs:
2228 2236 if util.safehasattr(originalfctx, attr):
2229 2237 setattr(self, attr, getattr(originalfctx, attr))
2230 2238
2231 2239 def data(self):
2232 2240 return self._datafunc()
2233 2241
2234 2242 class metadataonlyctx(committablectx):
2235 2243 """Like memctx but it's reusing the manifest of different commit.
2236 2244 Intended to be used by lightweight operations that are creating
2237 2245 metadata-only changes.
2238 2246
2239 2247 Revision information is supplied at initialization time. 'repo' is the
2240 2248 current localrepo, 'ctx' is original revision which manifest we're reuisng
2241 2249 'parents' is a sequence of two parent revisions identifiers (pass None for
2242 2250 every missing parent), 'text' is the commit.
2243 2251
2244 2252 user receives the committer name and defaults to current repository
2245 2253 username, date is the commit date in any format supported by
2246 2254 util.parsedate() and defaults to current date, extra is a dictionary of
2247 2255 metadata or is left empty.
2248 2256 """
2249 2257 def __new__(cls, repo, originalctx, *args, **kwargs):
2250 2258 return super(metadataonlyctx, cls).__new__(cls, repo)
2251 2259
2252 2260 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2253 2261 extra=None, editor=False):
2254 2262 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2255 2263 self._rev = None
2256 2264 self._node = None
2257 2265 self._originalctx = originalctx
2258 2266 self._manifestnode = originalctx.manifestnode()
2259 2267 parents = [(p or nullid) for p in parents]
2260 2268 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2261 2269
2262 2270 # sanity check to ensure that the reused manifest parents are
2263 2271 # manifests of our commit parents
2264 2272 mp1, mp2 = self.manifestctx().parents
2265 2273 if p1 != nullid and p1.manifestnode() != mp1:
2266 2274 raise RuntimeError('can\'t reuse the manifest: '
2267 2275 'its p1 doesn\'t match the new ctx p1')
2268 2276 if p2 != nullid and p2.manifestnode() != mp2:
2269 2277 raise RuntimeError('can\'t reuse the manifest: '
2270 2278 'its p2 doesn\'t match the new ctx p2')
2271 2279
2272 2280 self._files = originalctx.files()
2273 2281 self.substate = {}
2274 2282
2275 2283 if editor:
2276 2284 self._text = editor(self._repo, self, [])
2277 2285 self._repo.savecommitmessage(self._text)
2278 2286
2279 2287 def manifestnode(self):
2280 2288 return self._manifestnode
2281 2289
2282 2290 @property
2283 2291 def _manifestctx(self):
2284 2292 return self._repo.manifestlog[self._manifestnode]
2285 2293
2286 2294 def filectx(self, path, filelog=None):
2287 2295 return self._originalctx.filectx(path, filelog=filelog)
2288 2296
2289 2297 def commit(self):
2290 2298 """commit context to the repo"""
2291 2299 return self._repo.commitctx(self)
2292 2300
2293 2301 @property
2294 2302 def _manifest(self):
2295 2303 return self._originalctx.manifest()
2296 2304
2297 2305 @propertycache
2298 2306 def _status(self):
2299 2307 """Calculate exact status from ``files`` specified in the ``origctx``
2300 2308 and parents manifests.
2301 2309 """
2302 2310 man1 = self.p1().manifest()
2303 2311 p2 = self._parents[1]
2304 2312 # "1 < len(self._parents)" can't be used for checking
2305 2313 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2306 2314 # explicitly initialized by the list, of which length is 2.
2307 2315 if p2.node() != nullid:
2308 2316 man2 = p2.manifest()
2309 2317 managing = lambda f: f in man1 or f in man2
2310 2318 else:
2311 2319 managing = lambda f: f in man1
2312 2320
2313 2321 modified, added, removed = [], [], []
2314 2322 for f in self._files:
2315 2323 if not managing(f):
2316 2324 added.append(f)
2317 2325 elif self[f]:
2318 2326 modified.append(f)
2319 2327 else:
2320 2328 removed.append(f)
2321 2329
2322 2330 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,246 +1,251 b''
1 1 $ hg init a
2 2 $ cd a
3 3 $ echo a > a
4 4 $ hg add -n
5 5 adding a
6 6 $ hg st
7 7 ? a
8 8 $ hg add
9 9 adding a
10 10 $ hg st
11 11 A a
12 12 $ hg forget a
13 13 $ hg add
14 14 adding a
15 15 $ hg st
16 16 A a
17 $ mkdir dir
18 $ cd dir
19 $ hg add ../a
20 ../a already tracked!
21 $ cd ..
17 22
18 23 $ echo b > b
19 24 $ hg add -n b
20 25 $ hg st
21 26 A a
22 27 ? b
23 28 $ hg add b
24 29 $ hg st
25 30 A a
26 31 A b
27 32
28 33 should fail
29 34
30 35 $ hg add b
31 36 b already tracked!
32 37 $ hg st
33 38 A a
34 39 A b
35 40
36 41 #if no-windows
37 42 $ echo foo > con.xml
38 43 $ hg --config ui.portablefilenames=jump add con.xml
39 44 abort: ui.portablefilenames value is invalid ('jump')
40 45 [255]
41 46 $ hg --config ui.portablefilenames=abort add con.xml
42 47 abort: filename contains 'con', which is reserved on Windows: 'con.xml'
43 48 [255]
44 49 $ hg st
45 50 A a
46 51 A b
47 52 ? con.xml
48 53 $ hg add con.xml
49 54 warning: filename contains 'con', which is reserved on Windows: 'con.xml'
50 55 $ hg st
51 56 A a
52 57 A b
53 58 A con.xml
54 59 $ hg forget con.xml
55 60 $ rm con.xml
56 61 #endif
57 62
58 63 #if eol-in-paths
59 64 $ echo bla > 'hello:world'
60 65 $ hg --config ui.portablefilenames=abort add
61 66 adding hello:world
62 67 abort: filename contains ':', which is reserved on Windows: 'hello:world'
63 68 [255]
64 69 $ hg st
65 70 A a
66 71 A b
67 72 ? hello:world
68 73 $ hg --config ui.portablefilenames=ignore add
69 74 adding hello:world
70 75 $ hg st
71 76 A a
72 77 A b
73 78 A hello:world
74 79 #endif
75 80
76 81 $ hg ci -m 0 --traceback
77 82
78 83 $ hg log -r "heads(. or wdir() & file('**'))"
79 84 changeset: 0:* (glob)
80 85 tag: tip
81 86 user: test
82 87 date: Thu Jan 01 00:00:00 1970 +0000
83 88 summary: 0
84 89
85 90 should fail
86 91
87 92 $ hg add a
88 93 a already tracked!
89 94
90 95 $ echo aa > a
91 96 $ hg ci -m 1
92 97 $ hg up 0
93 98 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
94 99 $ echo aaa > a
95 100 $ hg ci -m 2
96 101 created new head
97 102
98 103 $ hg merge
99 104 merging a
100 105 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
101 106 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
102 107 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
103 108 [1]
104 109 $ hg st
105 110 M a
106 111 ? a.orig
107 112
108 113 wdir doesn't cause a crash, and can be dynamically selected if dirty
109 114
110 115 $ hg log -r "heads(. or wdir() & file('**'))"
111 116 changeset: 2147483647:ffffffffffff
112 117 parent: 2:* (glob)
113 118 parent: 1:* (glob)
114 119 user: test
115 120 date: * (glob)
116 121
117 122 should fail
118 123
119 124 $ hg add a
120 125 a already tracked!
121 126 $ hg st
122 127 M a
123 128 ? a.orig
124 129 $ hg resolve -m a
125 130 (no more unresolved files)
126 131 $ hg ci -m merge
127 132
128 133 Issue683: peculiarity with hg revert of an removed then added file
129 134
130 135 $ hg forget a
131 136 $ hg add a
132 137 $ hg st
133 138 ? a.orig
134 139 $ hg rm a
135 140 $ hg st
136 141 R a
137 142 ? a.orig
138 143 $ echo a > a
139 144 $ hg add a
140 145 $ hg st
141 146 M a
142 147 ? a.orig
143 148
144 149 Forgotten file can be added back (as either clean or modified)
145 150
146 151 $ hg forget b
147 152 $ hg add b
148 153 $ hg st -A b
149 154 C b
150 155 $ hg forget b
151 156 $ echo modified > b
152 157 $ hg add b
153 158 $ hg st -A b
154 159 M b
155 160 $ hg revert -qC b
156 161
157 162 $ hg add c && echo "unexpected addition of missing file"
158 163 c: * (glob)
159 164 [1]
160 165 $ echo c > c
161 166 $ hg add d c && echo "unexpected addition of missing file"
162 167 d: * (glob)
163 168 [1]
164 169 $ hg st
165 170 M a
166 171 A c
167 172 ? a.orig
168 173 $ hg up -C
169 174 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
170 175
171 176 forget and get should have the right order: added but missing dir should be
172 177 forgotten before file with same name is added
173 178
174 179 $ echo file d > d
175 180 $ hg add d
176 181 $ hg ci -md
177 182 $ hg rm d
178 183 $ mkdir d
179 184 $ echo a > d/a
180 185 $ hg add d/a
181 186 $ rm -r d
182 187 $ hg up -C
183 188 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
184 189 $ cat d
185 190 file d
186 191
187 192 Test that adding a directory doesn't require case matching (issue4578)
188 193 #if icasefs
189 194 $ mkdir -p CapsDir1/CapsDir
190 195 $ echo abc > CapsDir1/CapsDir/AbC.txt
191 196 $ mkdir CapsDir1/CapsDir/SubDir
192 197 $ echo def > CapsDir1/CapsDir/SubDir/Def.txt
193 198
194 199 $ hg add capsdir1/capsdir
195 200 adding CapsDir1/CapsDir/AbC.txt (glob)
196 201 adding CapsDir1/CapsDir/SubDir/Def.txt (glob)
197 202
198 203 $ hg forget capsdir1/capsdir/abc.txt
199 204
200 205 $ hg forget capsdir1/capsdir
201 206 removing CapsDir1/CapsDir/SubDir/Def.txt (glob)
202 207
203 208 $ hg add capsdir1
204 209 adding CapsDir1/CapsDir/AbC.txt (glob)
205 210 adding CapsDir1/CapsDir/SubDir/Def.txt (glob)
206 211
207 212 $ hg ci -m "AbCDef" capsdir1/capsdir
208 213
209 214 $ hg status -A capsdir1/capsdir
210 215 C CapsDir1/CapsDir/AbC.txt
211 216 C CapsDir1/CapsDir/SubDir/Def.txt
212 217
213 218 $ hg files capsdir1/capsdir
214 219 CapsDir1/CapsDir/AbC.txt (glob)
215 220 CapsDir1/CapsDir/SubDir/Def.txt (glob)
216 221
217 222 $ echo xyz > CapsDir1/CapsDir/SubDir/Def.txt
218 223 $ hg ci -m xyz capsdir1/capsdir/subdir/def.txt
219 224
220 225 $ hg revert -r '.^' capsdir1/capsdir
221 226 reverting CapsDir1/CapsDir/SubDir/Def.txt (glob)
222 227
223 228 The conditional tests above mean the hash on the diff line differs on Windows
224 229 and OS X
225 230 $ hg diff capsdir1/capsdir
226 231 diff -r * CapsDir1/CapsDir/SubDir/Def.txt (glob)
227 232 --- a/CapsDir1/CapsDir/SubDir/Def.txt Thu Jan 01 00:00:00 1970 +0000
228 233 +++ b/CapsDir1/CapsDir/SubDir/Def.txt * (glob)
229 234 @@ -1,1 +1,1 @@
230 235 -xyz
231 236 +def
232 237
233 238 $ hg mv CapsDir1/CapsDir/abc.txt CapsDir1/CapsDir/ABC.txt
234 239 $ hg ci -m "case changing rename" CapsDir1/CapsDir/AbC.txt CapsDir1/CapsDir/ABC.txt
235 240
236 241 $ hg status -A capsdir1/capsdir
237 242 M CapsDir1/CapsDir/SubDir/Def.txt
238 243 C CapsDir1/CapsDir/ABC.txt
239 244
240 245 $ hg remove -f 'glob:**.txt' -X capsdir1/capsdir
241 246 $ hg remove -f 'glob:**.txt' -I capsdir1/capsdir
242 247 removing CapsDir1/CapsDir/ABC.txt (glob)
243 248 removing CapsDir1/CapsDir/SubDir/Def.txt (glob)
244 249 #endif
245 250
246 251 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now